source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
client_start.py
|
from PyQt5 import QtWidgets, QtGui, QtCore, uic
from PyQt5.QtCore import Qt, QThread, pyqtSlot
import time
import ctypes
import threading
from client import Client
from handlers import GuiReciever
from repo.server_repo import DbRepo
from repo.server_models import Base
import sys
class MyWindow(QtWidgets.QPushButton):
def __init__(self):
QtWidgets.QPushButton.__init__(self)
def load_data(self, sp):
for i in range(1, 11):
time.sleep(0.1)
sp.showMessage("Загрузка данных...{}%".format(i * 10), QtCore.Qt.AlignHCenter | QtCore.Qt.AlignBottom,
QtCore.Qt.black)
QtWidgets.qApp.processEvents()
class AuthWindow(object):
def __init__(self):
app = QtWidgets.QApplication(sys.argv)
splash = QtWidgets.QSplashScreen(QtGui.QPixmap('img.jpg'))
splash.showMessage("Загрузка данных...{}%", QtCore.Qt.AlignHCenter | QtCore.Qt.AlignBottom, QtCore.Qt.black)
splash.show()
QtWidgets.qApp.processEvents()
window = MyWindow()
window.load_data(splash)
window = uic.loadUi('auth.ui')
main_window = uic.loadUi('main.ui')
main_window.show()
main_window.hide()
window.show()
splash.finish(window)
cl_list = []
repo = DbRepo('server.db', Base)
tess = repo.get_clients()
for el in tess:
cl_list.append(el.Name)
def match(self):
text = window.textEditMessage.toPlainText()
print(text)
print(cl_list)
try:
if cl_list.index(text):
ctypes.windll.user32.MessageBoxW(0, "Здравствуйте, {}".format(text), "Приветствие", 0)
print('hello')
# main_window.show()
print('yes')
window.close()
ter = MainWindow(text, main_window)
# ter()
except Exception as e:
print(e)
print('Нет такого пользователя')
ctypes.windll.user32.MessageBoxW(0, "Нет такого пользователя", "Ошибка", 0)
window.pushOk.clicked.connect(match)
quitAction = QtWidgets.QAction("Quit", None)
quitAction.triggered.connect(app.quit)
window.pushOk.addAction(quitAction)
sys.exit(app.exec())
class MainWindow(object):
def __init__(self, name, form):
self.name = name
self.window = form
# def __call__(self, *args, **kwargs):
# app = QtWidgets.QApplication(sys.argv)
print(self.name)
self.window.setWindowTitle(self.name)
client = Client(name=self.name)
# получаем список контактов с сервера, которые лежат у нас - не надежные
client.connect()
listener = GuiReciever(client.socket, client.request_queue)
# Связываем сигнал и слот
# слот обновление данных в списке сообщений
@pyqtSlot(str)
def update_chat(data):
''' Отображение сообщения в истории
'''
try:
msg = data
self.window.listWidgetMessages.addItem(msg)
except Exception as e:
print(e)
# сигнал мы берем из нашего GuiReciever
print('Привязываем сингнал слот')
listener.gotData.connect(update_chat)
print('Типо привязали')
# Используем QThread так рекомендуется, но можно и обычный
# th_listen = threading.Thread(target=listener.poll)
# th_listen.daemon = True
# th_listen.start()
self.th = QThread()
# pool_th = QThread()
listener.moveToThread(self.th)
# # ---------- Важная часть - связывание сигналов и слотов ----------
# При запуске потока будет вызван метод search_text
# pool_th.started.connect(listener.poll)
# pool_th.start()
self.th.started.connect(listener.poll)
self.th.start()
contact_list = client.get_contacts()
def load_contacts(contacts):
"""загрузка контактов в список"""
# чистим список
self.window.listWidgetContacts.clear()
# добавляем
for contact in contacts:
self.window.listWidgetContacts.addItem(contact)
# грузим контакты в список сразу при запуске приложения
load_contacts(contact_list)
def add_contact():
"""Добавление контакта"""
# Получаем имя из QTextEdit
username = self.window.textEditUsername.toPlainText()
if username:
# добавляем контакт - шлем запрос на сервер ...
client.add_contact(username)
# добавляем имя в QListWidget
self.window.listWidgetContacts.addItem(username)
# Связываем сигнал нажатия кнопки добавить со слотом функцией добавить контакт
self.window.pushButtonAddContact.clicked.connect(add_contact)
def del_contact():
"""Удаление контакта"""
# получаем выбранный элемент в QListWidget
current_item = self.window.listWidgetContacts.currentItem()
# получаем текст - это имя нашего контакта
username = current_item.text()
# удаление контакта (отправляем запрос на сервер)
client.del_contact(username)
# удаляем контакт из QListWidget
# window.listWidgetContacts.removeItemWidget(current_item) - так не работает
# del current_item
# Так норм удаляется, может быть можно как то проще
current_item = self.window.listWidgetContacts.takeItem(self.window.listWidgetContacts.row(current_item))
del current_item
# связываем сигнал нажатия на кнопку и слот функцию удаления контакта
self.window.pushButtonDelContect.clicked.connect(del_contact)
def open_chat():
try:
# грузим QDialog чата
dialog = uic.loadUi('chat.ui')
@pyqtSlot(str)
def update_chat(data):
''' Отображение сообщения в истории
'''
try:
msg = data
dialog.listWidgetMessages.addItem(msg)
except Exception as e:
print(e)
# сигнал мы берем из нашего GuiReciever
listener.gotData.connect(update_chat)
# получаем выделенного пользователя
selected_index = self.window.listWidgetContacts.currentIndex()
# получаем имя пользователя
user_name = selected_index.data()
# выставляем имя в название окна
dialog.setWindowTitle('Чат с {}'.format(user_name))
# отправка сообщения
def send_message():
text = dialog.textEditMessage.toPlainText()
if text:
print(client)
client.send_message(user_name, text)
# # будем выводить то что мы отправили в общем чате
msg = '{} >>> {}'.format(self.name, text)
dialog.listWidgetMessages.addItem(msg)
# связываем отправку с кнопкой ОК
dialog.pushOk.clicked.connect(send_message)
# запускаем в модальном режиме
# привязываем события модального окна (для демонстрации)
# dialog.pushOk.clicked.connect(dialog.accept)
dialog.exec()
except Exception as e:
print(e)
# Пока мы не можем передать элемент на который нажали - сделать в следующий раз через наследование
self.window.listWidgetContacts.itemDoubleClicked.connect(open_chat)
# Контекстное меню при нажатии правой кнопки мыши (пока тестовый вариант для демонстрации)
# Создаем на листе
self.window.listWidgetContacts.setContextMenuPolicy(Qt.CustomContextMenu)
self.window.listWidgetContacts.setContextMenuPolicy(Qt.ActionsContextMenu)
quitAction = QtWidgets.QAction("Quit", None)
# quitAction.triggered.connect(app.quit)
self.window.listWidgetContacts.addAction(quitAction)
print('КОнец')
# рисуем окно
self.window.show()
# точка запуска приложения
# sys.exit(app.exec_())
if __name__ == '__main__':
auth = AuthWindow()
auth()
|
thread.py
|
# curio/thread.py
#
# Not your parent's threading
__all__ = [ 'AWAIT', 'async_thread', 'spawn_thread' ]
# -- Standard Library
import threading
from concurrent.futures import Future
from functools import wraps
from inspect import iscoroutine, isgenerator
from contextlib import contextmanager
from types import coroutine
import sys
# -- Curio
from . import sync
from .task import spawn, disable_cancellation, check_cancellation, set_cancellation, current_task
from .traps import _future_wait, _scheduler_wait, _scheduler_wake
from . import errors
from . import io
from . import sched
from . import meta
_locals = threading.local()
class AsyncThread(object):
def __init__(self, target, args=(), kwargs={}, daemon=False):
self.target = target
self.args = args
self.kwargs = kwargs
self.daemon = daemon
self._request = Future()
self._done_evt = threading.Event()
self._terminate_evt = sync.UniversalEvent()
self._coro = None
self._result_value = None
self._result_exc = None
self._thread = None
self._task = None
self._joined = False
async def _coro_runner(self):
while True:
# Wait for a hand-off
await disable_cancellation(_future_wait(self._request))
coro = self._request.result()
self._request = Future()
# If no coroutine, we're shutting down
if not coro:
break
# Run the the coroutine
try:
self._result_value = await coro
self._result_exc = None
except BaseException as e:
self._result_value = None
self._result_exc = e
# Hand it back to the thread
coro = None
self._done_evt.set()
await self._terminate_evt.set()
def _func_runner(self):
_locals.thread = self
try:
self._result_value = self.target(*self.args, **self.kwargs)
self._result_exc = None
except BaseException as e:
self._result_value = None
self._result_exc = e
finally:
self._request.set_result(None)
async def start(self):
self._task = await spawn(self._coro_runner, daemon=True)
self._thread = threading.Thread(target=self._func_runner, daemon=True)
self._thread.start()
def AWAIT(self, coro):
self._request.set_result(coro)
self._done_evt.wait()
self._done_evt.clear()
if self._result_exc:
raise self._result_exc
else:
return self._result_value
async def join(self):
await self._terminate_evt.wait()
if self._result_exc:
raise errors.TaskError() from self._result_exc
else:
return self._result_value
async def wait(self):
await self._terminate_evt.wait()
self._joined = True
@property
def result(self):
if not self._terminated_evt.is_set():
raise RuntimeError('Thread not terminated')
if self._result_exc:
raise self._result_exc
else:
return self._result_value
async def cancel(self, *, exc=errors.TaskCancelled):
await self._task.cancel(exc=exc)
await self.wait()
def AWAIT(coro, *args, **kwargs):
'''
Await for a coroutine in an asynchronous thread. If coro is
not a proper coroutine, this function acts a no-op, returning coro.
'''
# If the coro is a callable and it's identifiable as a coroutine function,
# wrap it inside a coroutine and pass that.
if callable(coro):
if meta.iscoroutinefunction(coro) and hasattr(_locals, 'thread'):
async def _coro(coro):
return await coro(*args, **kwargs)
coro = _coro(coro)
else:
coro = coro(*args, **kwargs)
if iscoroutine(coro) or isgenerator(coro):
if hasattr(_locals, 'thread'):
return _locals.thread.AWAIT(coro)
else:
raise errors.AsyncOnlyError('Must be used as async')
else:
return coro
def _check_async_thread_block(code, offset, _cache={}):
'''
Analyze a given async_thread() context manager block to make sure it doesn't
contain any await operations
'''
if (code, offset) in _cache:
return _cache[code, offset]
import dis
instr = dis.get_instructions(code)
level = 0
result = True
for op in instr:
if op.offset < offset:
continue
if op.opname == 'SETUP_ASYNC_WITH':
level += 1
if op.opname in { 'YIELD_FROM', 'YIELD_VALUE' } and level > 0:
result = False
break
if op.opname == 'WITH_CLEANUP_START':
level -= 1
if level == 0:
break
_cache[code, offset] = result
return result
@coroutine
def _async_thread_exit():
yield _AsyncContextManager
class _AsyncContextManager:
async def __aenter__(self):
frame = sys._getframe(1)
if not _check_async_thread_block(frame.f_code, frame.f_lasti):
raise RuntimeError("async/await not allowed inside an async_thread context block")
# Check if we've been cancelled prior to launching any thread
await check_cancellation()
# We need to arrange a handoff of execution to a separate
# thread. It's a bit tricky, by the gist of the idea is that
# an AsyncThread is launched and made to wait for an event.
# The current task suspends itself on a throwaway scheduling
# barrier and arranges for the event to be set on suspension.
# The thread will then take over and drive the task through a
# single execution cycle on the thread.
self.task = await current_task()
self.start = sync.UniversalEvent()
self.thread = AsyncThread(target=self._runner)
self.barrier = sched.SchedBarrier()
await self.thread.start()
self.task.suspend_func = lambda: self.start.set()
await _scheduler_wait(self.barrier, 'ASYNC_CONTEXT_ENTER')
async def __aexit__(self, ty, val, tb):
# This method is not executed by Curio proper. Instead, the _runner()
# method below drives the coroutine into here. The await statement that
# follows causes the coroutine to stop at this point. _runner() will
# arrange to have the task resume normal execution in its original thread
await _async_thread_exit()
def _runner(self):
# Wait for the task to be suspended
self.start.wait()
# This coroutine stands in for the original task. The primary purpose
# of this is to make cancellation/timeout requests work properly. We'll
# wait for the _runner() thread to terminate. If it gets cancelled,
# we cancel the associated thread.
async def _waiter():
try:
await self.thread.join()
except errors.CancelledError as e:
await self.thread.cancel(exc=e)
# The original task needs to come back
self.task._switch(orig_coro)
self.task.cancel_pending = self.thread._task.cancel_pending
# This never returns... the task resumes immediately, but it's back
# in its original coroutine.
await current_task()
# Context-switch the Task to run a different coroutine momentarily
orig_coro = self.task._switch(_waiter())
# Reawaken the task. It's going to wake up in the _waiter() coroutine above.
AWAIT(_scheduler_wake(self.barrier, 1))
# Run the code through a *single* send() cycle. It should end up on the
# await _async_thread_exit() operation above. It is critically important that
# no bare await/async operations occur in this process. The code should have
# been validated previously. It *IS* okay for AWAIT() operations to be used.
result = orig_coro.send(None)
# If the result is not the result of _async_thread_exit() above, then
# some kind of very bizarre runtime problem has been encountered.
if result is not _AsyncContextManager:
self.task._throw(RuntimeError('yielding not allowed in AsyncThread. Got %r' % result))
# At this point, the body of the context manager is done. We simply
# fall off the end of the function. The _waiter() coroutine will
# awaken and switch back to the original coroutine.
def spawn_thread(func=None, *args, daemon=False):
'''
Launch an async thread. This mimicks the way a task is normally spawned. For
example:
t = await spawn_thread(func, arg1, arg2)
...
await t.join()
It can also be used as a context manager to launch a code block into a thread:
async with spawn_thread():
sync_op ...
sync_op ...
'''
if func is None:
return _AsyncContextManager()
else:
if iscoroutine(func) or meta.iscoroutinefunction(func):
raise TypeError("spawn_thread() can't be used on coroutines")
async def runner(args, daemon):
t = AsyncThread(func, args=args, daemon=daemon)
await t.start()
return t
return runner(args, daemon)
def async_thread(func=None, *, daemon=False):
'''
Decorator that is used to mark a callable as running in an asynchronous thread
'''
if func is None:
return lambda func: async_thread(func, daemon=daemon)
if meta.iscoroutinefunction(func):
raise TypeError("async_thread can't be applied to coroutines.")
@wraps(func)
def wrapper(*args, **kwargs):
if meta._from_coroutine() and not is_async_thread():
async def runner(*args, **kwargs):
# Launching async threads could result in a situation where
# synchronous code gets executed, but there is no opportunity
# for Curio to properly check for cancellation. This next
# call is a sanity check--if there's pending cancellation, don't
# even bother to launch the associated thread.
await check_cancellation()
t = AsyncThread(func, args=args, kwargs=kwargs, daemon=daemon)
await t.start()
try:
return await t.join()
except errors.CancelledError as e:
await t.cancel(exc=e)
await set_cancellation(t._task.cancel_pending)
if t._result_exc:
raise t._result_exc from None
else:
return t._result_value
except errors.TaskError as e:
raise e.__cause__ from None
return runner(*args, **kwargs)
else:
return func(*args, **kwargs)
wrapper._async_thread = True
return wrapper
def is_async_thread():
'''
Returns True if current thread is an async thread.
'''
return hasattr(_locals, 'thread')
class ThreadProxy(object):
'''
Wrapper around an instance that turns all of its methods into async
methods
'''
def __init__(self, obj):
self._obj = obj
def __getattr__(self, name):
value = getattr(self._obj, name)
if callable(value):
return async_thread(value)
else:
return value
# Highly experimental. These classes allow synchronous code running in an
# async thread to manipulate an async socket as if it were a normal socket.
# Seriously, don't use these. It's something I'm fooling around with right now.
# -Dave
class ThreadSocket(object):
'''
You were warned.
'''
def __init__(self, asyncsock):
self._asyncsock = asyncsock
def __getattr__(self, name):
return getattr(self._asyncsock, name)
def dup(self):
raise RuntimeError()
def makefile(self, mode, buffering=0, *, encoding=None, errors=None, newline=None):
return ThreadFile(self._asyncsock.makefile(mode, buffering, encoding=encoding, errors=errors, newline=newline))
def settimeout(self, *args):
pass
def as_stream(self):
return ThreadFile(self._asyncsock.as_stream())
def recv(self, maxsize, flags=0):
return AWAIT(self._asyncsock.recv(maxsize, flags))
def recv_into(self, buffer, nbytes=0, flags=0):
return AWAIT(self._asyncsock.recv_into(buffer, nbytes, flags))
def send(self, data, flags=0):
return AWAIT(self._asyncsock.send(data, flags))
def sendall(self, data, flags=0):
return AWAIT(self._asyncsock.sendall(data, flags))
def accept(self):
client, addr = AWAIT(self._asyncsock.accept())
return (type(self)(client), addr)
def connect_ex(self, address):
return AWAIT(self._asyncsock.connect_ex(address))
def connect(self, address):
return AWAIT(self._asyncsock.connect(address))
def recvfrom(self, buffersize, flags=0):
return AWAIT(self._asyncsock.recvfrom(buffersize, flags))
def recvfrom_into(self, buffer, bytes=0, flags=0):
return AWAIT(self._asyncsock.recvfrom_into(buffer, bytes, flags))
def sendto(self, bytes, flags_or_address, address=None):
return AWAIT(self._asyncsock.sendto(bytes, flags_or_address, address))
def recvmsg(self, bufsize, ancbufsize=0, flags=0):
return AWAIT(self._asyncsock.recvmsg(bufsize, ancbufsize,flags))
def recvmsg_into(self, buffers, ancbufsize=0, flags=0):
return AWAIT(self._asyncsock.recvmsg_into(buffers, ancbufsize, flags))
def sendmsg(self, buffers, ancdata=(), flags=0, address=None):
return AWAIT(self._asyncsock.sendmsg(buffers, ancdata, flags, address))
def do_handshake(self):
return AWAIT(self._asyncsock.do_handshake())
def close(self):
return AWAIT(self._asyncsock.close())
def shutdown(self, how):
return AWAIT(self._asyncsock.shutdown(how))
def __enter__(self):
return self._asyncsock.__enter__()
def __exit__(self, *args):
return self._asyncsock.__exit__(*args)
class ThreadFile(object):
'''
You were repeatedly warned.
'''
def __init__(self, asyncfileobj):
self._asyncfile = asyncfileobj
def write(self, data):
return AWAIT(self._asyncfile.write(data))
# ---- General I/O methods for streams
def read(self, maxbytes=-1):
return AWAIT(self._asyncfile.read(maxbytes))
def readall(self):
return AWAIT(self._asyncfile.readall())
def read_exactly(self, nbytes):
return AWAIT(self._asyncfile.read_exactly(nbytes))
def readinto(self, buf):
return AWAIT(self._asyncfile.readinto(buf))
def readline(self, maxbytes=-1):
return AWAIT(self._asyncfile.readline())
def readlines(self):
return AWAIT(self._asyncfile.readlines())
def writelines(self, lines):
return AWAIT(self._asyncfile.writelines(lines))
def flush(self):
return AWAIT(self._asyncfile.flush())
def close(self):
return AWAIT(self._asyncfile.close())
def __iter__(self):
return self._asyncfile.__iter__()
def __enter__(self):
return self._asyncfile.__enter__()
def __exit__(self, *args):
return self._asyncfile.__exit__(*args)
@contextmanager
def more_magic():
'''
A vantage point from where you can see "highly experimental" down below if you squint.
'''
from socket import socket as socketcls
socketcls_new = socketcls.__new__
# Patch socket.socket()
def __new__(cls, *args, **kwargs):
sock = socketcls_new(cls, *args, **kwargs)
if not is_async_thread():
return sock
sock.__init__(*args, **kwargs)
self = ThreadSocket(io.Socket(sock))
return self
socketcls.__new__ = __new__
try:
yield
finally:
socketcls.__new__ = socketcls_new
|
server.py
|
##
# This module implements a general-purpose server layer for sfa.
# The same basic server should be usable on the registry, component, or
# other interfaces.
#
# TODO: investigate ways to combine this with existing PLC server?
##
import sys
import socket, os
import traceback
import threading
from Queue import Queue
import SocketServer
import BaseHTTPServer
import SimpleHTTPServer
import SimpleXMLRPCServer
from OpenSSL import SSL
from sfa.trust.certificate import Keypair, Certificate
from sfa.trust.trustedroots import TrustedRoots
from sfa.util.config import Config
from sfa.trust.credential import *
from sfa.util.faults import *
from sfa.plc.api import SfaAPI
from sfa.util.cache import Cache
from sfa.util.sfalogging import logger
##
# Verification callback for pyOpenSSL. We do our own checking of keys because
# we have our own authentication spec. Thus we disable several of the normal
# prohibitions that OpenSSL places on certificates
def verify_callback(conn, x509, err, depth, preverify):
# if the cert has been preverified, then it is ok
if preverify:
#print " preverified"
return 1
# the certificate verification done by openssl checks a number of things
# that we aren't interested in, so we look out for those error messages
# and ignore them
# XXX SMBAKER: I don't know what this error is, but it's being returned
# by newer pl nodes.
if err == 9:
#print " X509_V_ERR_CERT_NOT_YET_VALID"
return 1
# allow self-signed certificates
if err == 18:
#print " X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT"
return 1
# allow certs that don't have an issuer
if err == 20:
#print " X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY"
return 1
# allow chained certs with self-signed roots
if err == 19:
return 1
# allow certs that are untrusted
if err == 21:
#print " X509_V_ERR_UNABLE_TO_VERIFY_LEAF_SIGNATURE"
return 1
# allow certs that are untrusted
if err == 27:
#print " X509_V_ERR_CERT_UNTRUSTED"
return 1
print " error", err, "in verify_callback"
return 0
##
# taken from the web (XXX find reference). Implements HTTPS xmlrpc request handler
class SecureXMLRpcRequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
"""Secure XML-RPC request handler class.
It it very similar to SimpleXMLRPCRequestHandler but it uses HTTPS for transporting XML data.
"""
def setup(self):
self.connection = self.request
self.rfile = socket._fileobject(self.request, "rb", self.rbufsize)
self.wfile = socket._fileobject(self.request, "wb", self.wbufsize)
def do_POST(self):
"""Handles the HTTPS POST request.
It was copied out from SimpleXMLRPCServer.py and modified to shutdown
the socket cleanly.
"""
try:
peer_cert = Certificate()
peer_cert.load_from_pyopenssl_x509(self.connection.get_peer_certificate())
self.api = SfaAPI(peer_cert = peer_cert,
interface = self.server.interface,
key_file = self.server.key_file,
cert_file = self.server.cert_file,
cache = self.cache)
# get arguments
request = self.rfile.read(int(self.headers["content-length"]))
remote_addr = (remote_ip, remote_port) = self.connection.getpeername()
self.api.remote_addr = remote_addr
response = self.api.handle(remote_addr, request, self.server.method_map)
except Exception, fault:
# This should only happen if the module is buggy
# internal error, report as HTTP server error
logger.log_exc("server.do_POST")
response = self.api.prepare_response(fault)
#self.send_response(500)
#self.end_headers()
# got a valid response
self.send_response(200)
self.send_header("Content-type", "text/xml")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
# shut down the connection
self.wfile.flush()
self.connection.shutdown() # Modified here!
##
# Taken from the web (XXX find reference). Implements an HTTPS xmlrpc server
class SecureXMLRPCServer(BaseHTTPServer.HTTPServer,SimpleXMLRPCServer.SimpleXMLRPCDispatcher):
def __init__(self, server_address, HandlerClass, key_file, cert_file, logRequests=True):
"""Secure XML-RPC server.
It it very similar to SimpleXMLRPCServer but it uses HTTPS for transporting XML data.
"""
logger.debug("SecureXMLRPCServer.__init__, server_address=%s, cert_file=%s"%(server_address,cert_file))
self.logRequests = logRequests
self.interface = None
self.key_file = key_file
self.cert_file = cert_file
self.method_map = {}
# add cache to the request handler
HandlerClass.cache = Cache()
#for compatibility with python 2.4 (centos53)
if sys.version_info < (2, 5):
SimpleXMLRPCServer.SimpleXMLRPCDispatcher.__init__(self)
else:
SimpleXMLRPCServer.SimpleXMLRPCDispatcher.__init__(self, True, None)
SocketServer.BaseServer.__init__(self, server_address, HandlerClass)
ctx = SSL.Context(SSL.SSLv23_METHOD)
ctx.use_privatekey_file(key_file)
ctx.use_certificate_file(cert_file)
# If you wanted to verify certs against known CAs.. this is how you would do it
#ctx.load_verify_locations('/etc/sfa/trusted_roots/plc.gpo.gid')
config = Config()
trusted_cert_files = TrustedRoots(config.get_trustedroots_dir()).get_file_list()
for cert_file in trusted_cert_files:
ctx.load_verify_locations(cert_file)
ctx.set_verify(SSL.VERIFY_PEER | SSL.VERIFY_FAIL_IF_NO_PEER_CERT, verify_callback)
ctx.set_verify_depth(5)
ctx.set_app_data(self)
self.socket = SSL.Connection(ctx, socket.socket(self.address_family,
self.socket_type))
self.server_bind()
self.server_activate()
# _dispatch
#
# Convert an exception on the server to a full stack trace and send it to
# the client.
def _dispatch(self, method, params):
logger.debug("SecureXMLRPCServer._dispatch, method=%s"%method)
try:
return SimpleXMLRPCServer.SimpleXMLRPCDispatcher._dispatch(self, method, params)
except:
# can't use format_exc() as it is not available in jython yet
# (even in trunk).
type, value, tb = sys.exc_info()
raise xmlrpclib.Fault(1,''.join(traceback.format_exception(type, value, tb)))
# override this one from the python 2.7 code
# originally defined in class TCPServer
def shutdown_request(self, request):
"""Called to shutdown and close an individual request."""
# ----------
# the std python 2.7 code just attempts a request.shutdown(socket.SHUT_WR)
# this works fine with regular sockets
# However we are dealing with an instance of OpenSSL.SSL.Connection instead
# This one only supports shutdown(), and in addition this does not
# always perform as expected
# ---------- std python 2.7 code
try:
#explicitly shutdown. socket.close() merely releases
#the socket and waits for GC to perform the actual close.
request.shutdown(socket.SHUT_WR)
except socket.error:
pass #some platforms may raise ENOTCONN here
# ----------
except TypeError:
# we are dealing with an OpenSSL.Connection object,
# try to shut it down but never mind if that fails
try: request.shutdown()
except: pass
# ----------
self.close_request(request)
## From Active State code: http://code.activestate.com/recipes/574454/
# This is intended as a drop-in replacement for the ThreadingMixIn class in
# module SocketServer of the standard lib. Instead of spawning a new thread
# for each request, requests are processed by of pool of reusable threads.
class ThreadPoolMixIn(SocketServer.ThreadingMixIn):
"""
use a thread pool instead of a new thread on every request
"""
# XX TODO: Make this configurable
# config = Config()
# numThreads = config.SFA_SERVER_NUM_THREADS
numThreads = 25
allow_reuse_address = True # seems to fix socket.error on server restart
def serve_forever(self):
"""
Handle one request at a time until doomsday.
"""
# set up the threadpool
self.requests = Queue()
for x in range(self.numThreads):
t = threading.Thread(target = self.process_request_thread)
t.setDaemon(1)
t.start()
# server main loop
while True:
self.handle_request()
self.server_close()
def process_request_thread(self):
"""
obtain request from queue instead of directly from server socket
"""
while True:
SocketServer.ThreadingMixIn.process_request_thread(self, *self.requests.get())
def handle_request(self):
"""
simply collect requests and put them on the queue for the workers.
"""
try:
request, client_address = self.get_request()
except socket.error:
return
if self.verify_request(request, client_address):
self.requests.put((request, client_address))
class ThreadedServer(ThreadPoolMixIn, SecureXMLRPCServer):
pass
##
# Implements an HTTPS XML-RPC server. Generally it is expected that SFA
# functions will take a credential string, which is passed to
# decode_authentication. Decode_authentication() will verify the validity of
# the credential, and verify that the user is using the key that matches the
# GID supplied in the credential.
class SfaServer(threading.Thread):
##
# Create a new SfaServer object.
#
# @param ip the ip address to listen on
# @param port the port to listen on
# @param key_file private key filename of registry
# @param cert_file certificate filename containing public key
# (could be a GID file)
def __init__(self, ip, port, key_file, cert_file,interface):
threading.Thread.__init__(self)
self.key = Keypair(filename = key_file)
self.cert = Certificate(filename = cert_file)
#self.server = SecureXMLRPCServer((ip, port), SecureXMLRpcRequestHandler, key_file, cert_file)
self.server = ThreadedServer((ip, port), SecureXMLRpcRequestHandler, key_file, cert_file)
self.server.interface=interface
self.trusted_cert_list = None
self.register_functions()
logger.info("Starting SfaServer, interface=%s"%interface)
##
# Register functions that will be served by the XMLRPC server. This
# function should be overridden by each descendant class.
def register_functions(self):
self.server.register_function(self.noop)
##
# Sample no-op server function. The no-op function decodes the credential
# that was passed to it.
def noop(self, cred, anything):
self.decode_authentication(cred)
return anything
##
# Execute the server, serving requests forever.
def run(self):
self.server.serve_forever()
|
icmp.py
|
'''ICMP echo ping.'''
import array
import random
import select
import socket
import struct
import sys
import threading
import time
import cping.protocols
import cping.utils
SOCKET_TYPE = socket.SOCK_RAW if sys.platform == 'win32' else socket.SOCK_DGRAM
class Ping(cping.protocols.Ping):
'''ICMP echo ping. The possible results:
* latency=x, error=False: ICMP echo reply
* latency=-1, error=False: timeout
'''
icmpv4_socket = icmpv6_socket = None
match_queue = []
def __init__(self, *args, **kwargs):
# Create the ICMP sockets if they haven't been already
if Ping.icmpv4_socket is None:
Ping.icmpv4_socket = socket.socket(socket.AF_INET, SOCKET_TYPE,
socket.IPPROTO_ICMP)
Ping.icmpv6_socket = socket.socket(socket.AF_INET6, SOCKET_TYPE,
socket.IPPROTO_ICMPV6)
# Begin listening on the ICMP sockets. Daemonized to exit with cping
threading.Thread(target=Ping.receiver, daemon=True).start()
super().__init__(*args, **kwargs)
@staticmethod
def receiver():
'''Monitors the ICMPv4 and ICMPv6 sockets for packets and attempt to
match them against `Ping.match_queue`.'''
icmp_sockets = [Ping.icmpv4_socket, Ping.icmpv6_socket]
while True:
# Block until there is at least one socket ready to be read
protocol_socket = select.select(icmp_sockets, [], [])[0][0]
# Strip the ICMP reply as macOS includes the IPv4 header in data
data = protocol_socket.recv(2048)[-(cping.utils.DATA_LENGTH + 8):]
# Checksum (2 bytes) ignored because IPv6 requires a pseudo-header
# that's too much work to calculate and is already calculated by the
# kernel. Identifier (2 bytes) ignored because Linux overwrites it,
# so the globally-unique sequence is being used for tracking.
data = data[:2] + b'\x00\x00\x00\x00' + data[6:]
# Obtain copy to avoid list length being changed while iterating
for expected, event in Ping.match_queue.copy():
if data == expected:
event.set()
break
def ping_loop(self, host):
try:
host_info = socket.getaddrinfo(host=host.address, port=None)[0]
except socket.gaierror:
host.status = 'Host resolution failed'
return
session = Session(4 if host_info[0] == socket.AF_INET else 6)
while not host.stop_signal.is_set():
request, reply = session.create_icmp_echo()
receive_event = threading.Event()
latency = -1
# Add the expected packet to the receiver queue
Ping.match_queue.append((reply, receive_event))
checkpoint = time.perf_counter()
try:
if host_info[0] == socket.AF_INET:
Ping.icmpv4_socket.sendto(request, host_info[4])
else:
Ping.icmpv6_socket.sendto(request, host_info[4])
if receive_event.wait(self.interval):
latency = time.perf_counter() - checkpoint
except OSError as exception:
host.status = str(exception)
break
finally:
# Remove from the queue
Ping.match_queue.remove((reply, receive_event))
host.add_result(latency)
# Block until signaled to continue
self.wait(host, latency)
class Session:
'''A ping session to a host.'''
sequence = -1
sequence_lock = threading.Lock()
def __init__(self, family):
'''Constructor.
Args:
family (int): The IP family of the host. IPv4 if `4`, else IPv6.
'''
self.family = 4 if family == 4 else 6
self.identifier = random.randrange(1, 2**16)
@staticmethod
def get_checksum(data):
'''Returns checksum of `data`. Not meant for ICMPv6 as that requires an IPv6
pseudo-header. ICMP checksum: www.ietf.org/rfc/rfc1071.html#section-4.1.'''
# 0-pad data of odd length
if len(data) % 2 == 1:
data += b'\x00'
# The sum of the data, split into 16-bit words
checksum = sum(array.array('H', data))
# End-around carry the sum to 16 bits
while checksum >> 16:
checksum = (checksum & 0xffff) + (checksum >> 16)
# One's complement of the sum, normalized to 16 bits
return struct.pack('!H', socket.htons(~checksum & 0xffff))
@staticmethod
def next_sequence():
'''Returns the next sequence, incrementing it by 1.'''
with Session.sequence_lock:
Session.sequence = Session.sequence + 1 & 0xffff
return Session.sequence
def create_icmp_echo(self):
'''Returns tuple of an ICMP echo request and its expected reply (bytes).'''
sequence = Session.next_sequence()
data = cping.utils.generate_data()
# ICMP type field differs between ICMPv4 and ICMPv6
request_type, reply_type = (8, 0) if self.family == 4 else (128, 129)
# Checksum is calculated with the checksum in the header set to 0
request = struct.pack('!BBHHH', request_type, 0, 0, self.identifier,
sequence) + data
request = request[:2] + Session.get_checksum(request) + request[4:]
# Identifier ignored; see matching logic in `Ping.receiver`
reply = struct.pack('!BBHHH', reply_type, 0, 0, 0, sequence) + data
return request, reply
|
blockingclientproxy.py
|
'''
@author: Deniz Altinbuken, Emin Gun Sirer
@note: ConCoord Client Proxy
@copyright: See LICENSE
'''
import os, random, select, socket, sys, threading, time
import cPickle as pickle
from threading import Thread, Condition, Lock
from concoord.pack import *
from concoord.enums import *
from concoord.utils import *
from concoord.exception import *
from concoord.connection import ConnectionPool, Connection
from concoord.message import *
from concoord.pvalue import PValueSet
try:
import dns
import dns.resolver
import dns.exception
except:
print("Install dnspython: http://www.dnspython.org/")
class ReqDesc:
def __init__(self, clientproxy, args, token):
with clientproxy.lock:
# acquire a unique command number
self.commandnumber = clientproxy.commandnumber
clientproxy.commandnumber += 1
self.cm = create_message(MSG_CLIENTREQUEST, clientproxy.me,
{FLD_PROPOSAL: Proposal(clientproxy.me, self.commandnumber, args),
FLD_TOKEN: token,
FLD_CLIENTBATCH: False,
FLD_SENDCOUNT: 0})
self.starttime = time.time()
self.replyarrived = Condition(clientproxy.lock)
self.lastreplycr = -1
self.replyvalid = False
self.reply = None
self.sendcount = 0
def __str__(self):
return "Request Descriptor for cmd %d\nMessage %s\nReply %s" % (self.commandnumber, str(self.cm), self.reply)
class ClientProxy():
def __init__(self, bootstrap, timeout=60, debug=False, token=None):
self.debug = debug
self.timeout = timeout
self.domainname = None
self.token = token
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,1)
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.socket.setblocking(1)
self.writelock = Lock()
self.bootstraplist = self.discoverbootstrap(bootstrap)
if len(self.bootstraplist) == 0:
raise ConnectionError("No bootstrap found")
if not self.connecttobootstrap():
raise ConnectionError("Cannot connect to any bootstrap")
myaddr = findOwnIP()
myport = self.socket.getsockname()[1]
self.me = Peer(myaddr, myport, NODE_CLIENT)
self.commandnumber = random.randint(1, sys.maxint)
# synchronization
self.lock = Lock()
self.pendingops = {} # pending requests indexed by commandnumber
self.doneops = {} # requests that are finalized, indexed by command number
# spawn thread, invoke recv_loop
try:
recv_thread = Thread(target=self.recv_loop, name='ReceiveThread')
recv_thread.daemon = True
recv_thread.start()
except (KeyboardInterrupt, SystemExit):
self._graceexit()
def _getipportpairs(self, bootaddr, bootport):
for node in socket.getaddrinfo(bootaddr, bootport, socket.AF_INET, socket.SOCK_STREAM):
yield (node[4][0],bootport)
def getbootstrapfromdomain(self, domainname):
tmpbootstraplist = []
try:
answers = dns.resolver.query('_concoord._tcp.'+domainname, 'SRV')
for rdata in answers:
for peer in self._getipportpairs(str(rdata.target), rdata.port):
if peer not in tmpbootstraplist:
tmpbootstraplist.append(peer)
except (dns.resolver.NXDOMAIN, dns.exception.Timeout):
if self.debug: print "Cannot resolve name"
return tmpbootstraplist
def discoverbootstrap(self, givenbootstrap):
tmpbootstraplist = []
try:
for bootstrap in givenbootstrap.split(","):
bootstrap = bootstrap.strip()
# The bootstrap list is read only during initialization
if bootstrap.find(":") >= 0:
bootaddr,bootport = bootstrap.split(":")
for peer in self._getipportpairs(bootaddr, int(bootport)):
if peer not in tmpbootstraplist:
tmpbootstraplist.append(peer)
else:
self.domainname = bootstrap
tmpbootstraplist = self.getbootstrapfromdomain(self.domainname)
except ValueError:
if self.debug: print "bootstrap usage: ipaddr1:port1,ipaddr2:port2 or domainname"
self._graceexit()
return tmpbootstraplist
def connecttobootstrap(self):
connected = False
for boottuple in self.bootstraplist:
try:
self.socket.close()
self.socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
self.socket.connect(boottuple)
self.conn = Connection(self.socket)
self.conn.settimeout(CLIENTRESENDTIMEOUT)
self.bootstrap = boottuple
connected = True
if self.debug: print "Connected to new bootstrap: ", boottuple
break
except socket.error, e:
if self.debug: print "Socket.Error: ", e
continue
return connected
def trynewbootstrap(self):
if self.domainname:
self.bootstraplist = self.getbootstrapfromdomain(self.domainname)
else:
oldbootstrap = self.bootstraplist.pop(0)
self.bootstraplist.append(oldbootstrap)
return self.connecttobootstrap()
def invoke_command(self, *args):
# create a request descriptor
reqdesc = ReqDesc(self, args, self.token)
self.pendingops[reqdesc.commandnumber] = reqdesc
# send the request
with self.writelock:
self.conn.send(reqdesc.cm)
with self.lock:
try:
while not reqdesc.replyvalid:
reqdesc.replyarrived.wait()
except KeyboardInterrupt:
self._graceexit()
del self.pendingops[reqdesc.commandnumber]
if reqdesc.reply.replycode == CR_OK or reqdesc.reply.replycode == CR_UNBLOCK:
return reqdesc.reply.reply
elif reqdesc.reply.replycode == CR_EXCEPTION:
raise Exception(pickle.loads(reqdesc.reply.reply))
else:
print "Unexpected Client Reply Code: %d" % reqdesc.reply.replycode
def recv_loop(self, *args):
socketset = [self.socket]
while True:
try:
needreconfig = False
inputready,outputready,exceptready = select.select(socketset, [], socketset, 0)
for s in inputready:
reply = self.conn.receive()
if reply is None:
needreconfig = True
elif reply and reply.type == MSG_CLIENTREPLY:
reqdesc = self.pendingops[reply.inresponseto]
with self.lock:
if reply.replycode == CR_OK or reply.replycode == CR_EXCEPTION or reply.replycode == CR_UNBLOCK:
# actionable response, wake up the thread
if reply.replycode == CR_UNBLOCK:
assert reqdesc.lastcr == CR_BLOCK, "unblocked thread not previously blocked"
reqdesc.lastcr = reply.replycode
reqdesc.reply = reply
reqdesc.replyvalid = True
reqdesc.replyarrived.notify()
elif reply.replycode == CR_INPROGRESS or reply.replycode == CR_BLOCK:
# the thread is already waiting, no need to do anything
reqdesc.lastcr = reply.replycode
elif reply.replycode == CR_REJECTED or reply.replycode == CR_LEADERNOTREADY:
needreconfig = True
else:
print "should not happen -- unknown response type"
while needreconfig:
if not self.trynewbootstrap():
raise ConnectionError("Cannot connect to any bootstrap")
needreconfig = False
# check if we need to re-send any pending operations
for commandno,reqdesc in self.pendingops.iteritems():
if not reqdesc.replyvalid and reqdesc.lastreplycr != CR_BLOCK:
reqdesc.sendcount += 1
reqdesc.cm[FLD_SENDCOUNT] = reqdesc.sendcount
if not self.conn.send(reqdesc.cm):
needreconfig = True
continue
except KeyboardInterrupt:
self._graceexit()
def _graceexit(self):
os._exit(0)
|
pjit_test.py
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from functools import partial
import logging
import threading
import unittest
from collections import OrderedDict, namedtuple
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import jax
import jax.numpy as jnp
from jax._src import test_util as jtu
from jax.errors import JAXTypeError
from jax import lax
# TODO(skye): do we still wanna call this PartitionSpec?
from jax.experimental import PartitionSpec as P
from jax.experimental.maps import xmap, mesh, Mesh
from jax.experimental import global_device_array
import jax.experimental.pjit as pjit_lib
from jax.experimental.pjit import (pjit, pjit_p, with_sharding_constraint,
SpecSync, FROM_GDA)
from jax.interpreters import pxla
from jax.interpreters import xla
from jax._src.lib import xla_client
from jax._src.util import prod, curry, unzip2
from jax.config import config
config.parse_flags_with_absl()
def setUpModule():
if jax.default_backend() not in {'gpu', 'tpu'}:
raise unittest.SkipTest("pjit only supports GPU and TPU backends")
jtu.set_spmd_lowering_flag(True)
def tearDownModule():
jtu.restore_spmd_lowering_flag()
def create_gda(global_shape, global_mesh, mesh_axes):
global_data = np.arange(
prod(global_shape), dtype=np.float32).reshape(global_shape)
return global_device_array.GlobalDeviceArray.from_callback(
global_shape, global_mesh, mesh_axes, lambda idx: global_data[idx])
@curry
def check_1d_2d_mesh(f, set_mesh):
return parameterized.named_parameters(
{"testcase_name": "_" + name, "mesh": mesh, "resources": resources}
for name, mesh, resources in (
("2", (("x", 2),), "x"),
("2x1", (("x", 2), ("y", 1)), ("x", "y")),
("2x2", (("x", 2), ("y", 2)), ("x", "y")),
))(jtu.with_mesh_from_kwargs(f) if set_mesh else f)
def create_global_mesh(mesh_shape, axis_names):
size = prod(mesh_shape)
if len(jax.devices()) < size:
raise unittest.SkipTest(f"Test requires {size} local devices")
mesh_devices = np.array(jax.devices()[:size]).reshape(mesh_shape)
global_mesh = Mesh(mesh_devices, axis_names)
return global_mesh
# TODO(skye): make the buffer donation utils part of JaxTestCase
class PJitTest(jtu.BufferDonationTestCase):
@jtu.with_mesh([('x', 1)])
def testDeviceBufferAval(self):
@partial(pjit, in_axis_resources=None, out_axis_resources=P('x'))
def f(x):
return x
shape = (2, 2)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
actual = f(x)
expected = x
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 1)
self.assertAllClose(
actual.device_buffers[0].to_py(), expected, check_dtypes=False)
# Repro for a bug on device_buffer aval
_ = repr(actual.device_buffers)
@jtu.with_mesh([('x', 2)])
def testBasic1D(self):
@partial(pjit,
in_axis_resources=(P('x'), P('x')),
out_axis_resources=None)
def f(x, y):
return x + y
shape = (8, 8)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
actual = f(x, x + 1)
expected = x + (x + 1)
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 2)
self.assertAllClose(actual.device_buffers[0].to_py(), expected,
check_dtypes=False)
@jtu.with_mesh([('x', 2), ('y', 2)])
def testBasic2D(self):
@partial(pjit,
in_axis_resources=(P(None, 'x', 'y'), P('y')),
out_axis_resources=P('x'))
def f(x, y):
return x @ y
x_shape = (8, 6, 4)
y_shape = (4, 2)
x = jnp.arange(np.prod(x_shape)).reshape(x_shape)
y = jnp.arange(np.prod(y_shape)).reshape(y_shape)
actual = f(x, y)
expected = x @ y
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 4)
split0, split1 = np.split(expected, 2)
self.assertAllClose(actual.device_buffers[0].to_py(), split0,
check_dtypes=False)
self.assertAllClose(actual.device_buffers[1].to_py(), split0,
check_dtypes=False)
self.assertAllClose(actual.device_buffers[2].to_py(), split1,
check_dtypes=False)
self.assertAllClose(actual.device_buffers[3].to_py(), split1,
check_dtypes=False)
@jtu.with_mesh([('x', 2), ('y', 2)])
def testTwoMeshAxisSharding(self):
@partial(pjit,
in_axis_resources=P(('x', 'y'),),
out_axis_resources=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(np.prod(shape)).reshape(shape)
actual = f(x, x + 1)
expected = x @ (x + 1)
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 4)
splits = np.split(expected, 4)
self.assertAllClose(actual.device_buffers[0].to_py(), splits[0],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[1].to_py(), splits[1],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[2].to_py(), splits[2],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[3].to_py(), splits[3],
check_dtypes=False)
@jtu.with_mesh([('x', 2)])
def testBufferDonation(self):
@partial(pjit,
in_axis_resources=P('x'),
out_axis_resources=P('x'),
donate_argnums=0)
def f(x, y):
return x + y
shard = pjit(lambda x: x, in_axis_resources=P('x'),
out_axis_resources=P('x'))
x = shard(jnp.ones((2, 5)) * 4)
y = shard(jnp.ones((2, 5)) * 2)
expected = x + y
self.assertAllClose(f(x, y), expected)
self.assertNotDeleted(y)
self.assertDeleted(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testShardingConstraint(self):
@partial(pjit, in_axis_resources=None, out_axis_resources=None)
def f(x):
y = x + 1
y = with_sharding_constraint(y, P('x', 'y'))
return y * 2
shape = (8, 8)
x = np.arange(prod(shape)).reshape(shape)
expected = (x + 1) * 2
actual = f(x)
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 2)
self.assertAllClose(actual.device_buffers[0].to_py(), expected,
check_dtypes=False)
hlo = jax.xla_computation(f)(np.ones(shape))
# Annotation from with_sharding_constraint
self.assertIn("sharding={devices=[2,1]0,1}", hlo.as_hlo_text())
# Annotation from pjit
self.assertIn("sharding={replicated}", hlo.as_hlo_text())
@jtu.with_mesh([('x', 2), ('y', 1)])
def testShardingConstraintPyTree(self):
@partial(pjit, in_axis_resources=None, out_axis_resources=None)
def f(x):
x = with_sharding_constraint(x, [P('x', 'y'), P('y', 'x')])
x = x.copy()
x[0]["a"] *= 2
return x
shape = (8, 8)
v = np.arange(prod(shape)).reshape(shape)
x = [{"a": v, "b": v * 2}, v * 3]
actual = f(x)
expected = x.copy()
expected[0]["a"] *= 2
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertLen(actual[0]["a"].device_buffers, 2)
hlo = jax.xla_computation(f)(x)
# Annotations from with_sharding_constraint
self.assertIn("sharding={devices=[2,1]0,1}", hlo.as_hlo_text())
self.assertIn("sharding={devices=[1,2]0,1}", hlo.as_hlo_text())
# Annotation from pjit
self.assertIn("sharding={replicated}", hlo.as_hlo_text())
def testCaching(self):
def f(x):
assert should_be_tracing
return jnp.sin(x) * 2
x = np.arange(16).reshape(4, 4)
devices = np.array(list(jax.local_devices())[:4])
if devices.size < 4:
raise unittest.SkipTest("Test requires 4 devices")
devices = devices.reshape((2, 2))
with mesh(devices, ('x', 'y')):
should_be_tracing = True
pjit(f, in_axis_resources=P(('x', 'y')), out_axis_resources=None)(x)
should_be_tracing = False
pjit(f, in_axis_resources=P(('x', 'y')), out_axis_resources=None)(x)
# Re-create the mesh to make sure that has no influence on caching
with mesh(devices, ('x', 'y')):
should_be_tracing = False
pjit(f, in_axis_resources=P(('x', 'y')), out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testNested(self):
# Add a constant captured by the nested pjit to make things more complicated
h = jnp.arange(4)
f = pjit(lambda x: x.sum() + h.sum(), in_axis_resources=P('x', 'y'), out_axis_resources=None)
g = pjit(lambda x: f(jnp.sin(x)), in_axis_resources=P('x', None), out_axis_resources=None)
x = jnp.arange(16).reshape((4, 4))
y = g(x)
self.assertAllClose(y, jnp.sin(x).sum() + h.sum())
self.assertTrue(hasattr(y, "sharding_spec"))
@check_1d_2d_mesh(set_mesh=True)
def testAutodiff(self, mesh, resources):
if len(mesh) != 2: return
assert resources == ('x', 'y')
# Add a constant captured by the nested pjit to make things more complicated
h = jnp.arange(4)
f = pjit(lambda x: x.sum(1) * h.sum(),
in_axis_resources=P('x', 'y'), out_axis_resources=P(('x', 'y')))
g = pjit(lambda x: f(jnp.sin(x * 4 + 2)),
in_axis_resources=P('x', None), out_axis_resources=P(('x', 'y')))
jtu.check_grads(g, (jnp.arange(16, dtype=jnp.float32).reshape((4, 4)) / 100,),
order=2)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testEvalJaxpr(self):
x, y = jnp.arange(4), jnp.arange(5)
f = pjit(lambda x, y: x.sum() + jnp.sin(y),
in_axis_resources=(P('x'), P('y')),
out_axis_resources=P('y'))
f_jaxpr = jax.make_jaxpr(f)(x, y)
f_eval = jax.core.jaxpr_as_fun(f_jaxpr)
r, = f_eval(x, y)
self.assertAllClose(r, x.sum() + jnp.sin(y))
@jtu.with_mesh([('x', 2)])
def testNonArrayArg(self):
self.assertEqual(pjit(lambda x: x + 2,
in_axis_resources=None,
out_axis_resources=None)(1), 3)
@jtu.with_mesh([('x', 2)])
def testNonHashableAxisResources(self):
x = jnp.arange(4)
y = pjit(lambda x: {'b': x['a'] + 2},
in_axis_resources=({'a': P('x')},),
out_axis_resources={'b': P('x')})({'a': x})
self.assertAllClose(y, {'b': x + 2})
@jtu.with_mesh([('x', 2)])
def testGradOfConstraint(self):
# Make sure that we can compute grads through sharding constraints
h = lambda x: jnp.sin(with_sharding_constraint(x, P('x'))).sum()
f = pjit(lambda x: jax.grad(h)(x),
in_axis_resources=None, out_axis_resources=None)
x = jnp.arange(8, dtype=jnp.float32)
self.assertAllClose(f(x), jnp.cos(x))
@jtu.with_mesh([('x', 2)])
def testNoopPartitionSpecs(self):
noops = [P(), P(None), P(()), P((), None), P(None, None, ())]
x = jnp.arange(8).reshape((2, 2, 2))
for spec in noops:
y = pjit(lambda x: x * 2, in_axis_resources=spec, out_axis_resources=spec)(x)
self.assertAllClose(y, x * 2)
@jtu.with_mesh([('x', 2)])
def testVmapModifiesAxisResources(self):
h = pjit(lambda x, y: (x + y, x, y), in_axis_resources=P('x'), out_axis_resources=None)
x = jnp.arange(4)
y = jnp.arange(5*4).reshape((5, 4))
jaxpr = jax.make_jaxpr(jax.vmap(h, in_axes=(None, 0)))(x, y).jaxpr
eqn = jaxpr.eqns[0]
self.assertIs(eqn.primitive, pjit_p)
x_sync, y_sync = (spec.sync for spec in eqn.params['in_axis_resources'])
self.assertEqual(x_sync, SpecSync.IN_SYNC)
self.assertEqual(y_sync, SpecSync.DIM_PERMUTE)
x_sync, y_sync, z_sync = (spec.sync for spec in eqn.params['out_axis_resources'])
self.assertEqual(x_sync, SpecSync.DIM_PERMUTE)
self.assertEqual(y_sync, SpecSync.IN_SYNC)
self.assertEqual(z_sync, SpecSync.DIM_PERMUTE)
@jtu.with_mesh([('x', 2)])
def testVMap(self):
f = pjit(lambda x, y: (x + y, x), in_axis_resources=P('x'), out_axis_resources=P('x'))
x = jnp.arange(4)
y = jnp.arange(5*4).reshape((5, 4))
z, w = jax.vmap(f, in_axes=(None, 0), out_axes=(0, None))(x, y)
self.assertAllClose(z, x + y)
self.assertAllClose(w, x)
self.assertEqual(z.sharding_spec.sharding, (pxla.NoSharding(), pxla.Chunked([2])))
self.assertEqual(w.sharding_spec.sharding, (pxla.Chunked([2]),))
@jtu.with_mesh([('x', 2)])
def testVMapShardingConstraint(self):
f = pjit(lambda x: with_sharding_constraint(x, P('x')),
in_axis_resources=P(), out_axis_resources=P('x'))
x = jnp.arange(5*4).reshape((5, 4))
jaxpr = jax.make_jaxpr(jax.vmap(f))(x)
pjit_eqn, = jaxpr.eqns
constraint_eqn, = pjit_eqn.params['jaxpr'].eqns
self.assertEqual(constraint_eqn.params['axis_resources'].partitions, ((), ('x',)))
self.assertEqual(constraint_eqn.params['axis_resources'].sync, SpecSync.DIM_PERMUTE)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testShardingInXMap(self):
h = pjit(lambda x: x, in_axis_resources=P('x'), out_axis_resources=None)
f = xmap(lambda x: h(x * 2), in_axes=['i', ...], out_axes=['i', ...],
axis_resources={'i': 'y'})
x = jnp.arange(16).reshape((4, 4))
rule = xla._translations[pjit_p]
test_rule_called = False
def _test_rule(*args, **kwargs):
nonlocal test_rule_called
test_rule_called = True
in_axis_resources = kwargs['in_axis_resources']
self.assertEqual(len(in_axis_resources), 1)
self.assertIn(('y',), in_axis_resources[0].partitions)
return rule(*args, **kwargs)
try:
xla._translations[pjit_p] = _test_rule
f(x)
self.assertTrue(test_rule_called)
finally:
xla._translations[pjit_p] = rule
@jtu.with_mesh([('x', 2)])
def testLowerWithDuckTyping(self):
x = jax.ShapeDtypeStruct((2, 2), jnp.float32)
# Make sure this doesn't crash
pjit(lambda x: x + 4,
in_axis_resources=P('x'), out_axis_resources=P('x')).lower(x)
@jtu.with_mesh([('x', 2)])
def testLowerDonateArgnumsAvailable(self):
x = jax.ShapeDtypeStruct((2, 2), jnp.float32)
def f(*args):
x, *_ = args
return x
f_low = pjit(f, donate_argnums=(0,),
in_axis_resources=P('x'), out_axis_resources=P('x')).lower(x)
f_com = f_low.compile()
f_low.donate_argnums == f_com.donate_argnums == (0,)
def testInfeed(self):
devices = np.array(jax.local_devices())
nr_devices = len(devices)
shape = (nr_devices * 3, nr_devices * 5)
def f_for_jit(x):
token = lax.create_token(x)
(y,), token = lax.infeed(
token, shape=(jax.ShapedArray(x.shape, np.float32),))
(z,), token = lax.infeed(
token, shape=(jax.ShapedArray(x.shape, np.float32),))
(w,), token = lax.infeed(
token, shape=(jax.ShapedArray(x.shape, np.float32),))
return x + y + z + w
x = np.arange(np.prod(shape), dtype=np.float32).reshape(shape)
y = x * 2.
z = x * 3.
w = x * 4.
# Transfer data to infeed before executing the function. For GPUs, the
# execution of the compiled function is blocking, so transferring data
# to infeed before executing ensures that the execution does not deadlock
# waiting for the infeed data.
logging.info('Transfering to infeed for the jit call')
d = devices[0]
d.transfer_to_infeed((y,))
d.transfer_to_infeed((z,))
d.transfer_to_infeed((w,))
# JIT
logging.info('Making jit call')
res0 = jax.jit(f_for_jit)(x)
self.assertAllClose(res0, x + y + z + w, check_dtypes=True)
# PJIT
def f_for_pjit(x):
token = lax.create_token(x)
# A replicated infeed
(y,), token = lax.infeed(
token,
shape=(jax.ShapedArray(x.shape, np.float32),),
partitions=(None,))
# An infeed sharded on first axis
(z,), token = lax.infeed(
token,
shape=(jax.ShapedArray(x.shape, np.float32),),
partitions=(P(nr_devices, 1),))
# An infeed sharded on second axis
(w,), token = lax.infeed(
token,
shape=(jax.ShapedArray(x.shape, np.float32),),
partitions=(P(1, nr_devices),))
return x + y + z + w
logging.info('Transfering to infeed for the pjit call')
for didx, d in enumerate(devices):
# Transfer the whole array to all devices for replicated.
d.transfer_to_infeed((y,))
# For sharded infeed, transfer only the needed slices to each device.
d.transfer_to_infeed((z[3 * didx:3 * didx + 3, :]))
d.transfer_to_infeed((w[:, 5 * didx:5 * didx + 5],))
with mesh(devices, ['d']):
logging.info('Making pjit call')
res = pjit(
f_for_pjit, in_axis_resources=(P('d'),), out_axis_resources=P('d'))(
x)
self.assertAllClose(res0, res, check_dtypes=True)
def testOutfeed(self):
devices = np.array(jax.local_devices())
nr_devices = len(devices)
shape = (nr_devices * 3, nr_devices * 5)
def f(x):
token = lax.create_token(x)
token = lax.outfeed(token, x, partitions=(None,))
token = lax.outfeed(token, x, partitions=(P(nr_devices, 1),))
token = lax.outfeed(token, x, partitions=(P(1, nr_devices),))
return x
x = np.arange(np.prod(shape), dtype=np.float32).reshape(shape)
def dispatch():
with mesh(devices, ['d']):
logging.info('Making pjit call')
pjit(f, in_axis_resources=(P('d'),), out_axis_resources=P('d'))(x)
execution = threading.Thread(target=dispatch)
execution.start()
def check_outfeed(d, x):
y, = d.transfer_from_outfeed(
xla_client.shape_from_pyval((x,)).with_major_to_minor_layout_if_absent())
self.assertAllClose(x, y, check_dtypes=True)
logging.info('Transfering from outfeed for the pjit call')
for didx, d in enumerate(devices):
# Transfer the whole array from all devices for replicated.
check_outfeed(d, x)
# For sharded outfeed, the results are sliced.
check_outfeed(d, x[3 * didx:3 * didx + 3, :])
check_outfeed(d, x[:, 5 * didx:5 * didx + 5])
execution.join()
@jtu.with_mesh([('x', 2)])
def testWithCustomPRNGKey(self):
if not config.jax_enable_custom_prng:
raise unittest.SkipTest("test requires jax_enable_custom_prng")
key = jax.prng.seed_with_impl(jax.prng.rbg_prng_impl, 87)
# Make sure this doesn't crash
pjit(lambda x: x, in_axis_resources=(None), out_axis_resources=(None))(key)
@jtu.with_mesh([('x', 2), ('y', 2)])
def testLowerCompile(self):
@partial(pjit,
in_axis_resources=P(('x', 'y'),),
out_axis_resources=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(np.prod(shape)).reshape(shape)
expected = x @ (x + 1)
exe = f.lower(x, x + 1).compile()
actual = exe(x, x + 1)
splits = np.split(expected, 4)
self.assertAllClose(actual.device_buffers[0].to_py(), splits[0],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[1].to_py(), splits[1],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[2].to_py(), splits[2],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[3].to_py(), splits[3],
check_dtypes=False)
@jtu.with_mesh([('x', 2), ('y', 2)])
def testLowerCompileWithKwargs(self):
@partial(pjit,
in_axis_resources=P(('x', 'y'),),
out_axis_resources=P(('x', 'y'),))
def f(x, y, **kwargs):
return x @ y
shape = (8, 8)
x = jnp.arange(np.prod(shape)).reshape(shape)
exe = f.lower(x, x + 1).compile()
self.assertRaisesRegex(
NotImplementedError,
"function was compiled by a transformation that does not support "
"keyword arguments, but called with keyword arguments: a, b",
lambda: exe(x, x + 1, a=1, b=2))
@jtu.with_mesh([('x', 2), ('y', 2)])
def testLowerCompileInTreeMismatch(self):
@partial(pjit,
in_axis_resources=P(('x', 'y'),),
out_axis_resources=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(np.prod(shape)).reshape(shape)
exe = f.lower(x, x + 1).compile()
self.assertRaisesRegex(
TypeError, "function compiled for .*, called with .*",
lambda: exe([x], [x + 1]))
@jtu.with_mesh([('x', 2), ('y', 2)])
def testLowerCompileArgTypeMismatch(self):
@partial(pjit,
in_axis_resources=P(('x', 'y'),),
out_axis_resources=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(np.prod(shape)).reshape(shape)
x_f32 = x.astype(jnp.float32)
x_i32 = x.astype(jnp.int32)
exe = f.lower(x_f32, x_f32).compile()
self.assertRaisesRegex(
TypeError,
"Computation compiled for input types:\n.*float32.*\n"
"called with:\n.*int32.*",
lambda: exe(x_i32, x_i32))
class GDAPjitTest(jtu.JaxTestCase):
@jtu.with_mesh([('x', 4), ('y', 2)])
def test_pjit_gda_single_output(self):
global_mesh = create_global_mesh((4, 2), ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = P('x', 'y')
input_data = np.arange(
prod(global_input_shape)).reshape(global_input_shape)
def cb(index):
return input_data[index]
gda_obj = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes, cb)
with jax._src.config.parallel_functions_output_gda(True):
@partial(pjit, in_axis_resources=FROM_GDA, out_axis_resources=P('x', 'y'))
def f(x):
return x @ x.T
expected_matrix_mul = input_data @ input_data.T
out = f(gda_obj)
self.assertIsInstance(out, global_device_array.GlobalDeviceArray)
self.assertEqual(out.shape, (8, 8))
self.assertEqual(out.local_shards[0].data.shape, (2, 4))
self.assertDictEqual(out._global_mesh.shape, {'x': 4, 'y': 2})
for s in out.local_shards:
self.assertArraysEqual(s.data, expected_matrix_mul[s.index])
with self.assertRaisesRegex(
ValueError, ('For a non-GDA input, the corresponding resource in '
'in_axis_resources cannot be `pjit.FROM_GDA`.')):
f(input_data)
@jtu.with_mesh([('x', 4), ('y', 2)])
def test_pjit_gda_multi_input_multi_output(self):
global_mesh = create_global_mesh((4, 2), ('x', 'y'))
global_input_shape = (8, 2)
input_data = np.arange(
prod(global_input_shape)).reshape(global_input_shape)
def cb(index):
return input_data[index]
mesh_axes1 = P('x', 'y')
gda1 = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes1, cb)
mesh_axes2 = P('x')
gda2 = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes2, cb)
mesh_axes3 = P(('x', 'y'))
gda3 = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes3, cb)
mesh_axes4 = P(None)
gda4 = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes4, cb)
with jax._src.config.parallel_functions_output_gda(True):
@partial(
pjit,
# `FROM_GDA` will be replicated for all the inputs.
in_axis_resources=FROM_GDA,
out_axis_resources=(mesh_axes1, mesh_axes4, mesh_axes2, mesh_axes3))
def f(x, y, z, a):
return x @ x.T, y, z, a
out1, out2, out3, out4 = f(gda1, gda2, gda3, gda4)
self.assertIsInstance(out1, global_device_array.GlobalDeviceArray)
self.assertEqual(out1.shape, (8, 8))
self.assertEqual(out1.local_shards[0].data.shape, (2, 4))
self.assertEqual(out1.local_shards[0].index, (slice(0, 2), slice(0, 4)))
self.assertEqual(out1.local_shards[1].index, (slice(0, 2), slice(4, 8)))
self.assertListEqual([s.replica_id for s in out1.local_shards],
[0, 0, 0, 0, 0, 0, 0, 0])
expected_matrix_mul = input_data @ input_data.T
for s in out1.local_shards:
self.assertArraysEqual(s.data, expected_matrix_mul[s.index])
self.assertIsInstance(out2, global_device_array.GlobalDeviceArray)
self.assertEqual(out2.shape, (8, 2))
self.assertEqual(out2.local_shards[0].data.shape, (8, 2))
self.assertEqual(out2.local_shards[0].index, (slice(None), slice(None)))
self.assertEqual(out2.local_shards[1].index, (slice(None), slice(None)))
self.assertListEqual([s.replica_id for s in out2.local_shards],
[0, 1, 2, 3, 4, 5, 6, 7])
for s in out2.local_shards:
self.assertArraysEqual(s.data, input_data)
self.assertIsInstance(out3, global_device_array.GlobalDeviceArray)
self.assertEqual(out3.shape, (8, 2))
self.assertEqual(out3.local_shards[0].data.shape, (2, 2))
self.assertEqual(out3.local_shards[0].index, (slice(0, 2), slice(None)))
self.assertEqual(out3.local_shards[1].index, (slice(0, 2), slice(None)))
self.assertListEqual([s.replica_id for s in out3.local_shards],
[0, 1, 0, 1, 0, 1, 0, 1])
for s in out3.local_shards:
self.assertArraysEqual(s.data, input_data[s.index])
self.assertIsInstance(out4, global_device_array.GlobalDeviceArray)
self.assertEqual(out4.shape, (8, 2))
self.assertEqual(out4.local_shards[0].data.shape, (1, 2))
self.assertEqual(out4.local_shards[0].index, (slice(0, 1), slice(None)))
self.assertEqual(out4.local_shards[1].index, (slice(1, 2), slice(None)))
self.assertListEqual([s.replica_id for s in out4.local_shards],
[0, 0, 0, 0, 0, 0, 0, 0])
for s in out4.local_shards:
self.assertArraysEqual(s.data, input_data[s.index])
@jtu.with_mesh([('x', 4), ('y', 2)])
def test_pjit_gda_mixed_inputs(self):
global_mesh = create_global_mesh((4, 2), ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = P('x', 'y')
input_data = np.arange(
prod(global_input_shape)).reshape(global_input_shape)
def cb(index):
return input_data[index]
gda_obj = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes, cb)
with jax._src.config.parallel_functions_output_gda(True):
@partial(pjit,
in_axis_resources=(FROM_GDA, P('x', 'y')),
out_axis_resources=(P('x', 'y'), P(('x', 'y'))))
def f(x, y):
return x @ x.T, y @ y.T
expected_matrix_mul = input_data @ input_data.T
out1, out2 = f(gda_obj, input_data)
self.assertIsInstance(out1, global_device_array.GlobalDeviceArray)
self.assertEqual(out1.shape, (8, 8))
self.assertEqual(out1.local_shards[0].data.shape, (2, 4))
self.assertDictEqual(out1._global_mesh.shape, {'x': 4, 'y': 2})
for s in out1.local_shards:
self.assertArraysEqual(s.data, expected_matrix_mul[s.index])
self.assertIsInstance(out2, global_device_array.GlobalDeviceArray)
self.assertEqual(out2.shape, (8, 8))
self.assertEqual(out2.local_shards[0].data.shape, (1, 8))
self.assertDictEqual(out2._global_mesh.shape, {'x': 4, 'y': 2})
for s in out2.local_shards:
self.assertArraysEqual(s.data, expected_matrix_mul[s.index])
@jtu.with_mesh([('x', 4), ('y', 2)])
def test_pjit_gda_non_gda_inputs(self):
input_shape = (8, 2)
input_data = np.arange(prod(input_shape)).reshape(input_shape)
with jax._src.config.parallel_functions_output_gda(True):
@partial(pjit,
in_axis_resources=(None, P('x', 'y')),
out_axis_resources=(P('x', 'y'), P(('x', 'y'))))
def f(x, y):
return x @ x.T, y @ y.T
expected_matrix_mul = input_data @ input_data.T
out1, out2 = f(input_data, input_data)
self.assertIsInstance(out1, global_device_array.GlobalDeviceArray)
self.assertEqual(out1.shape, (8, 8))
self.assertEqual(out1.local_shards[0].data.shape, (2, 4))
self.assertDictEqual(out1._global_mesh.shape, {'x': 4, 'y': 2})
for s in out1.local_shards:
self.assertArraysEqual(s.data, expected_matrix_mul[s.index])
self.assertIsInstance(out2, global_device_array.GlobalDeviceArray)
self.assertEqual(out2.shape, (8, 8))
self.assertEqual(out2.local_shards[0].data.shape, (1, 8))
self.assertDictEqual(out2._global_mesh.shape, {'x': 4, 'y': 2})
for s in out2.local_shards:
self.assertArraysEqual(s.data, expected_matrix_mul[s.index])
@jtu.with_mesh([('x', 2), ('y', 2)])
def test_pjit_gda_mesh_mismatch(self):
global_mesh = create_global_mesh((4, 2), ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = ['x', 'y']
global_input_data = np.arange(
prod(global_input_shape), dtype=np.float32).reshape(global_input_shape)
def cb(index):
return global_input_data[index]
gda_obj = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes, cb)
with self.assertRaisesRegex(ValueError,
"Pjit's mesh and GDA's mesh should be equal."):
@partial(pjit, in_axis_resources=FROM_GDA, out_axis_resources=P('x', 'y'))
def f(x):
return x
f(gda_obj)
@jtu.with_mesh([('x', 4), ('y', 2)])
def test_pjit_gda_wrong_resource_for_gda_input(self):
global_mesh = create_global_mesh((4, 2), ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = ['x']
global_input_data = np.arange(
prod(global_input_shape), dtype=np.float32).reshape(global_input_shape)
def cb(index):
return global_input_data[index]
gda_obj = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes, cb)
with self.assertRaisesWithLiteralMatch(
ValueError,
"Got an input GDA to pjit with different partitioning than specified "
'in the in_axis_resources argument to pjit. The partitioning must '
'match, or use `jax.experimental.pjit.FROM_GDA` in `in_axis_resources`. '
"Got GDA spec: PartitionSpec('x',) and "
"pjit spec: PartitionSpec('x', 'y') "
'for GDA: GlobalDeviceArray(shape=(8, 2), dtype=float32)'):
@partial(pjit, in_axis_resources=P('x', 'y'), out_axis_resources=P('x', 'y'))
def f(x):
return x
f(gda_obj)
@jtu.with_mesh([('x', 4), ('y', 2)])
def test_pjit_gda_caching(self):
global_mesh = create_global_mesh((4, 2), ('x', 'y'))
input_shape = (8, 2)
mesh_axes = P('x', 'y')
input_data = np.arange(
prod(input_shape), dtype=np.float32).reshape(input_shape)
def cb(index):
return input_data[index]
gda_obj = global_device_array.GlobalDeviceArray.from_callback(
input_shape, global_mesh, mesh_axes, cb)
trace_counter = [0]
@partial(pjit, in_axis_resources=mesh_axes, out_axis_resources=P('x', 'y'))
def f(x, y):
trace_counter[0] += 1
return x @ y.T
f(gda_obj, gda_obj)
self.assertListEqual(trace_counter, [1])
f(gda_obj, gda_obj)
self.assertListEqual(trace_counter, [1])
f(input_data, input_data)
self.assertListEqual(trace_counter, [2])
f(gda_obj, input_data)
self.assertListEqual(trace_counter, [3])
@jtu.with_mesh([('x', 4), ('y', 2)])
def test_partition_spec_mismatch_semantically_equivalent(self):
global_mesh = create_global_mesh((4, 2), ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = [None]
global_input_data = np.arange(
prod(global_input_shape), dtype=np.float32).reshape(global_input_shape)
def cb(index):
return global_input_data[index]
with jax._src.config.parallel_functions_output_gda(True):
gda_obj = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes, cb)
@partial(pjit, in_axis_resources=P(None), out_axis_resources=P(None))
def f(x):
return x
output_gda = f(gda_obj)
# Ensure output_gda._mesh_axes = P() is matched with P(None).
self.assertEqual(output_gda._mesh_axes, ())
# P(None) is in_axis_resources.
f(output_gda)
def test_from_gda_duplicates(self):
global_mesh = create_global_mesh((1, 2), ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = ['x', 'y']
input_gda = create_gda(global_input_shape, global_mesh, mesh_axes)
# It's occasionally possible to end up with two FROM_GDA singletons (e.g. if
# pickling in_axis_resources and sending to other processes). Make sure this
# this doesn't cause an error to avoid user confusion.
from_gda_dup = pjit_lib._FromGsdaSingleton()
with mesh(global_mesh.devices, global_mesh.axis_names):
pjit(lambda x: x, in_axis_resources=from_gda_dup, out_axis_resources=None)(
input_gda)
def spec_regex(s):
return str(s).replace(r"(", r"\(").replace(r")", r"\)")
class PJitErrorTest(jtu.JaxTestCase):
@check_1d_2d_mesh(set_mesh=True)
def testNonDivisibleArgs(self, mesh, resources):
x = jnp.ones((3, 2))
spec = P(resources, None)
mesh_size = str(np.prod([dim[1] for dim in mesh], dtype=np.int64))
with self.assertRaisesRegex(ValueError,
r"One of pjit arguments.*" + spec_regex(spec) + r".*"
r"implies that the size of its dimension 0 should be "
r"divisible by " + mesh_size + r", but it is equal to 3"):
pjit(lambda x: x, in_axis_resources=spec, out_axis_resources=None)(x)
@check_1d_2d_mesh(set_mesh=True)
def testNonDivisibleOuts(self, mesh, resources):
x = jnp.ones((3, 2))
spec = P(resources, None)
mesh_size = str(np.prod([dim[1] for dim in mesh], dtype=np.int64))
with self.assertRaisesRegex(ValueError,
r"One of pjit outputs.*" + spec_regex(spec) + r".*"
r"implies that the size of its dimension 0 should be "
r"divisible by " + mesh_size + r", but it is equal to 3"):
pjit(lambda x: x, in_axis_resources=None, out_axis_resources=P(resources, None))(x)
@check_1d_2d_mesh(set_mesh=True)
def testNonDivisibleConstraint(self, mesh, resources):
x = jnp.ones((3, 2))
spec = P(resources,)
mesh_size = str(np.prod([dim[1] for dim in mesh], dtype=np.int64))
with self.assertRaisesRegex(ValueError,
r"One of with_sharding_constraint arguments"
r".*" + spec_regex(spec) + r".*implies that the size of "
r"its dimension 0 should be divisible by " + mesh_size +
r", but it is equal to 3"):
pjit(lambda x: with_sharding_constraint(x, spec),
in_axis_resources=None, out_axis_resources=None)(x)
@check_1d_2d_mesh(set_mesh=False)
@jtu.with_mesh([('z', 1)])
def testUndefinedResourcesArgs(self, mesh, resources):
x = jnp.ones((2, 2))
spec = P(resources,)
with self.assertRaisesRegex(ValueError,
r"One of pjit arguments.*" + spec_regex(spec) + r", "
r"but resource axis x is undefined."):
pjit(lambda x: x, in_axis_resources=spec, out_axis_resources=None)(x)
@check_1d_2d_mesh(set_mesh=False)
@jtu.with_mesh([('z', 1)])
def testUndefinedResourcesOuts(self, mesh, resources):
x = jnp.ones((2, 2))
spec = P(resources,)
with self.assertRaisesRegex(ValueError,
r"One of pjit outputs.*" + spec_regex(spec) + r", "
r"but resource axis x is undefined."):
pjit(lambda x: x, in_axis_resources=None, out_axis_resources=spec)(x)
@check_1d_2d_mesh(set_mesh=False)
@jtu.with_mesh([('z', 1)])
def testUndefinedResourcesConstraint(self, mesh, resources):
x = jnp.ones((2, 2))
spec = P(resources,)
with self.assertRaisesRegex(ValueError,
r"One of with_sharding_constraint arguments"
r".*" + spec_regex(spec) + r", but resource axis "
r"x is undefined."):
pjit(lambda x: with_sharding_constraint(x, spec),
in_axis_resources=None, out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRankTooLowArgs(self):
x = jnp.arange(2)
spec = P('x', 'y')
error = (r"One of pjit arguments.*" + spec_regex(spec) + r", which implies "
r"that it has a rank of at least 2, but it is 1")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x.sum(), in_axis_resources=spec, out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRankTooLowOuts(self):
x = jnp.arange(2)
spec = P('x', 'y')
error = (r"One of pjit outputs.*" + spec_regex(spec) + r", which implies "
r"that it has a rank of at least 2, but it is 0")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x.sum(), in_axis_resources=None, out_axis_resources=spec)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRankTooLowConstraint(self):
x = jnp.arange(2)
spec = P('x', 'y')
error = (r"One of with_sharding_constraint arguments " +
r"was given.*" + spec_regex(spec) + r", which implies "
r"that it has a rank of at least 2, but it is 1")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: with_sharding_constraint(x, spec),
in_axis_resources=None, out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRepeatedInResources(self):
x = jnp.arange(2)
for spec in [P('x', 'x'), P('x', ('y', 'x'))]:
error = (r"A single in_axis_resources specification can map every mesh "
r"axis to at most one positional dimension, but " +
spec_regex(spec) + " has duplicate entries for `x`")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x, in_axis_resources=spec, out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRepeatedOutResources(self):
x = jnp.arange(2)
for spec in [P('x', 'x'), P('x', ('y', 'x'))]:
error = (r"A single out_axis_resources specification can map every mesh "
r"axis to at most one positional dimension, but " +
spec_regex(spec) + " has duplicate entries for `x`")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x, in_axis_resources=None, out_axis_resources=spec)(x)
@jtu.with_mesh([('x', 2)])
def testInputShardsXMapAxis(self):
spec = P('x')
f = xmap(pjit(lambda x: x + 2, in_axis_resources=spec, out_axis_resources=None),
in_axes=['i', ...], out_axes=['i', ...], axis_resources={'i': 'x'})
x = jnp.arange(4).reshape((2, 2))
error = (r"pjit input has an axis resources specification of " +
spec_regex(spec) + r" that uses one or more mesh axes already used by "
r"xmap to partition a named axis appearing in its named_shape \(both "
r"use mesh axes `x`\)")
with self.assertRaisesRegex(JAXTypeError, error):
f(x)
@jtu.with_mesh([('x', 2)])
def testOutputShardsXMapAxis(self):
spec = P('x')
f = xmap(pjit(lambda x: x + 2, in_axis_resources=None, out_axis_resources=spec),
in_axes=['i', ...], out_axes=['i', ...], axis_resources={'i': 'x'})
x = jnp.arange(4).reshape((2, 2))
error = (r"pjit output has an axis resources specification of " +
spec_regex(spec) + r" that uses one or more mesh axes already used by "
r"xmap to partition a named axis appearing in its named_shape \(both "
r"use mesh axes `x`\)")
with self.assertRaisesRegex(JAXTypeError, error):
f(x)
@jtu.with_mesh([('x', 2)])
def testConstraintShardsXMapAxis(self):
spec = P('x')
f = xmap(lambda x: with_sharding_constraint(x, axis_resources=spec),
in_axes=['i', ...], out_axes=['i', ...], axis_resources={'i': 'x'})
x = jnp.arange(4).reshape((2, 2))
error = (r"with_sharding_constraint input has an axis resources specification of " +
spec_regex(spec) + r" that uses one or more mesh axes already used by "
r"xmap to partition a named axis appearing in its named_shape \(both "
r"use mesh axes `x`\)")
with self.assertRaisesRegex(JAXTypeError, error):
f(x)
@jtu.with_mesh([('x', 2)])
def testCatchesInnerXMapErrors(self):
f = pjit(xmap(lambda x, y: x, in_axes=(['i'], ['j']), out_axes=['i', 'j'],
axis_resources={'i': 'x', 'j': 'x'}),
in_axis_resources=None, out_axis_resources=None)
x = jnp.arange(4)
with self.assertRaises(JAXTypeError):
f(x, x)
def testEmptyMesh(self):
error = (r"pjit requires a non-empty mesh! Are you sure that it's defined "
r"at the call site?")
with self.assertRaisesRegex(RuntimeError, error):
pjit(lambda x: x, in_axis_resources=None, out_axis_resources=None)(jnp.arange(4))
@jtu.with_mesh([('x', 2)])
def testAxisResourcesMismatch(self):
x = jnp.ones([])
p = [None, None, None]
pjit(lambda x: x, (p,), p)([x, x, x]) # OK
error = re.escape(
r"pjit in_axis_resources specification must be a tree prefix of the "
r"corresponding value, got specification (None, None, None) for value "
r"tree PyTreeDef((*, *)). Note that pjit in_axis_resources that are "
r"non-trivial pytrees should always be wrapped in a tuple representing "
r"the argument list.")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x, y: x, p, p)(x, x) # Error, but make sure we hint at tupling
# TODO(apaszke): Disable implicit list casts and enable this
# error = re.escape(
# r"pjit in_axis_resources specification must be a tree prefix of the "
# r"corresponding value, got specification (None, None, None) for value "
# r"tree PyTreeDef(([*, *, *],)). Note that pjit in_axis_resources that "
# r"are non-trivial pytrees should always be wrapped in a tuple representing "
# r"the argument list. In particular, you're passing in a single argument "
# r"which means that pjit in_axis_resources might need to be wrapped in a "
# r"singleton tuple.")
# with self.assertRaisesRegex(ValueError, error):
# pjit(lambda x: x, p, p)([x, x, x]) # Error, but make sure we hint at singleton tuple
error = re.escape(
r"pjit out_axis_resources specification must be a tree prefix of the "
r"corresponding value, got specification [[None, None, None], None] for "
r"value tree PyTreeDef([*, *, *]).")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x, (p,), [p, None])([x, x, x]) # Error, we raise a generic tree mismatch message
@jtu.with_mesh([('x', 2)])
def testNestedDifferentResources(self):
@partial(pjit, in_axis_resources=P('x'), out_axis_resources=None)
def f(x):
with mesh(np.array([jax.local_devices()[0]]), ('x')):
@partial(pjit, in_axis_resources=P('x'), out_axis_resources=None)
def h(x):
return x
return h(x)
xshape = (2, 5, 6)
x = jnp.arange(np.prod(xshape)).reshape(xshape)
with self.assertRaisesRegex(RuntimeError,
"Changing the physical mesh is not allowed.*"):
f(x)
class UtilTest(jtu.JaxTestCase):
def testOpShardingRoundTrip(self):
FakeDevice = namedtuple('FakeDevice', ['id'])
mesh_named_shape = OrderedDict([('a', 2), ('b', 3), ('c', 4), ('d', 7), ('e', 4)])
mesh_axes, mesh_shape = unzip2(mesh_named_shape.items())
devices = [FakeDevice(i) for i in range(np.prod(list(mesh_shape)))]
mesh = pxla.Mesh(np.array(devices).reshape(*mesh_shape), tuple(mesh_axes))
dims = 5
aval = jax.core.ShapedArray((len(devices),) * dims, jnp.float32)
def roundtrip(spec):
op_sharding = pjit_lib.get_aval_sharding_proto(aval, spec, mesh)
parsed_spec = pjit_lib.parse_op_sharding(op_sharding, mesh).partitions
self.assertEqual(parsed_spec[:len(spec)], spec)
self.assertEqual(parsed_spec[len(spec):], ((),) * (len(parsed_spec) - len(spec)))
special_specs = [P()]
for spec in special_specs:
roundtrip(spec)
rng = self.rng()
for i in range(100):
spec = [()] * dims
for axis in rng.permutation(mesh_axes)[:rng.randint(low=1, high=len(mesh_axes) + 1)]:
spec[rng.choice(dims)] += (axis,)
roundtrip(P(*spec))
@parameterized.named_parameters(
("linear", {'x': 0, 'y': 1, 'z': 2}, (('x',), ('y',), ('z',))),
("combine", {'x': 0, 'y': 0, 'z': 1}, (('x', 'y'), ('z',))),
("skip", {'x': 0, 'y': 0, 'z': 2}, (('x', 'y'), None, ('z',))),
("multi_skip", {'x': 0, 'y': 1, 'z': 3}, (('x',), ('y',), None, ('z',))),
)
def test_array_mapping_to_axis_resources(self, inp, expected_out):
self.assertEqual(pxla.array_mapping_to_axis_resources(inp), expected_out)
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
|
store.py
|
import json
import logging
import os
import threading
import time
import uuid as uuid_builder
from copy import deepcopy
from os import mkdir, path, unlink
from threading import Lock
from changedetectionio.notification import (
default_notification_body,
default_notification_format,
default_notification_title,
)
# Is there an existing library to ensure some data store (JSON etc) is in sync with CRUD methods?
# Open a github issue if you know something :)
# https://stackoverflow.com/questions/6190468/how-to-trigger-function-on-value-change
class ChangeDetectionStore:
lock = Lock()
def __init__(self, datastore_path="/datastore", include_default_watches=True, version_tag="0.0.0"):
# Should only be active for docker
# logging.basicConfig(filename='/dev/stdout', level=logging.INFO)
self.needs_write = False
self.datastore_path = datastore_path
self.json_store_path = "{}/url-watches.json".format(self.datastore_path)
self.stop_thread = False
self.__data = {
'note': "Hello! If you change this file manually, please be sure to restart your changedetection.io instance!",
'watching': {},
'settings': {
'headers': {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Accept-Encoding': 'gzip, deflate', # No support for brolti in python requests yet.
'Accept-Language': 'en-GB,en-US;q=0.9,en;'
},
'requests': {
'timeout': 15, # Default 15 seconds
'minutes_between_check': 3 * 60, # Default 3 hours
'workers': 10 # Number of threads, lower is better for slow connections
},
'application': {
'password': False,
'base_url' : None,
'extract_title_as_title': False,
'fetch_backend': 'html_requests',
'global_ignore_text': [], # List of text to ignore when calculating the comparison checksum
'global_subtractive_selectors': [],
'ignore_whitespace': False,
'notification_urls': [], # Apprise URL list
# Custom notification content
'notification_title': default_notification_title,
'notification_body': default_notification_body,
'notification_format': default_notification_format,
}
}
}
# Base definition for all watchers
self.generic_definition = {
'url': None,
'tag': None,
'last_checked': 0,
'last_changed': 0,
'paused': False,
'last_viewed': 0, # history key value of the last viewed via the [diff] link
'newest_history_key': "",
'title': None,
# Re #110, so then if this is set to None, we know to use the default value instead
# Requires setting to None on submit if it's the same as the default
'minutes_between_check': None,
'previous_md5': "",
'uuid': str(uuid_builder.uuid4()),
'headers': {}, # Extra headers to send
'body': None,
'method': 'GET',
'history': {}, # Dict of timestamp and output stripped filename
'ignore_text': [], # List of text to ignore when calculating the comparison checksum
# Custom notification content
'notification_urls': [], # List of URLs to add to the notification Queue (Usually AppRise)
'notification_title': default_notification_title,
'notification_body': default_notification_body,
'notification_format': default_notification_format,
'css_filter': "",
'subtractive_selectors': [],
'trigger_text': [], # List of text or regex to wait for until a change is detected
'fetch_backend': None,
'extract_title_as_title': False
}
if path.isfile('changedetectionio/source.txt'):
with open('changedetectionio/source.txt') as f:
# Should be set in Dockerfile to look for /source.txt , this will give us the git commit #
# So when someone gives us a backup file to examine, we know exactly what code they were running.
self.__data['build_sha'] = f.read()
try:
# @todo retest with ", encoding='utf-8'"
with open(self.json_store_path) as json_file:
from_disk = json.load(json_file)
# @todo isnt there a way todo this dict.update recursively?
# Problem here is if the one on the disk is missing a sub-struct, it wont be present anymore.
if 'watching' in from_disk:
self.__data['watching'].update(from_disk['watching'])
if 'app_guid' in from_disk:
self.__data['app_guid'] = from_disk['app_guid']
if 'settings' in from_disk:
if 'headers' in from_disk['settings']:
self.__data['settings']['headers'].update(from_disk['settings']['headers'])
if 'requests' in from_disk['settings']:
self.__data['settings']['requests'].update(from_disk['settings']['requests'])
if 'application' in from_disk['settings']:
self.__data['settings']['application'].update(from_disk['settings']['application'])
# Reinitialise each `watching` with our generic_definition in the case that we add a new var in the future.
# @todo pretty sure theres a python we todo this with an abstracted(?) object!
for uuid, watch in self.__data['watching'].items():
_blank = deepcopy(self.generic_definition)
_blank.update(watch)
self.__data['watching'].update({uuid: _blank})
self.__data['watching'][uuid]['newest_history_key'] = self.get_newest_history_key(uuid)
print("Watching:", uuid, self.__data['watching'][uuid]['url'])
# First time ran, doesnt exist.
except (FileNotFoundError, json.decoder.JSONDecodeError):
if include_default_watches:
print("Creating JSON store at", self.datastore_path)
self.add_watch(url='http://www.quotationspage.com/random.php', tag='test')
self.add_watch(url='https://news.ycombinator.com/', tag='Tech news')
self.add_watch(url='https://www.gov.uk/coronavirus', tag='Covid')
self.add_watch(url='https://changedetection.io/CHANGELOG.txt')
self.__data['version_tag'] = version_tag
# Helper to remove password protection
password_reset_lockfile = "{}/removepassword.lock".format(self.datastore_path)
if path.isfile(password_reset_lockfile):
self.__data['settings']['application']['password'] = False
unlink(password_reset_lockfile)
if not 'app_guid' in self.__data:
import os
import sys
if "pytest" in sys.modules or "PYTEST_CURRENT_TEST" in os.environ:
self.__data['app_guid'] = "test-" + str(uuid_builder.uuid4())
else:
self.__data['app_guid'] = str(uuid_builder.uuid4())
# Generate the URL access token for RSS feeds
if not 'rss_access_token' in self.__data['settings']['application']:
import secrets
secret = secrets.token_hex(16)
self.__data['settings']['application']['rss_access_token'] = secret
self.needs_write = True
# Finally start the thread that will manage periodic data saves to JSON
save_data_thread = threading.Thread(target=self.save_datastore).start()
# Returns the newest key, but if theres only 1 record, then it's counted as not being new, so return 0.
def get_newest_history_key(self, uuid):
if len(self.__data['watching'][uuid]['history']) == 1:
return 0
dates = list(self.__data['watching'][uuid]['history'].keys())
# Convert to int, sort and back to str again
# @todo replace datastore getter that does this automatically
dates = [int(i) for i in dates]
dates.sort(reverse=True)
if len(dates):
# always keyed as str
return str(dates[0])
return 0
def set_last_viewed(self, uuid, timestamp):
self.data['watching'][uuid].update({'last_viewed': int(timestamp)})
self.needs_write = True
def update_watch(self, uuid, update_obj):
with self.lock:
# In python 3.9 we have the |= dict operator, but that still will lose data on nested structures...
for dict_key, d in self.generic_definition.items():
if isinstance(d, dict):
if update_obj is not None and dict_key in update_obj:
self.__data['watching'][uuid][dict_key].update(update_obj[dict_key])
del (update_obj[dict_key])
self.__data['watching'][uuid].update(update_obj)
self.__data['watching'][uuid]['newest_history_key'] = self.get_newest_history_key(uuid)
self.needs_write = True
@property
def data(self):
has_unviewed = False
for uuid, v in self.__data['watching'].items():
self.__data['watching'][uuid]['newest_history_key'] = self.get_newest_history_key(uuid)
if int(v['newest_history_key']) <= int(v['last_viewed']):
self.__data['watching'][uuid]['viewed'] = True
else:
self.__data['watching'][uuid]['viewed'] = False
has_unviewed = True
# #106 - Be sure this is None on empty string, False, None, etc
# Default var for fetch_backend
if not self.__data['watching'][uuid]['fetch_backend']:
self.__data['watching'][uuid]['fetch_backend'] = self.__data['settings']['application']['fetch_backend']
# Re #152, Return env base_url if not overriden, @todo also prefer the proxy pass url
env_base_url = os.getenv('BASE_URL','')
if not self.__data['settings']['application']['base_url']:
self.__data['settings']['application']['base_url'] = env_base_url.strip('" ')
self.__data['has_unviewed'] = has_unviewed
return self.__data
def get_all_tags(self):
tags = []
for uuid, watch in self.data['watching'].items():
# Support for comma separated list of tags.
for tag in watch['tag'].split(','):
tag = tag.strip()
if tag not in tags:
tags.append(tag)
tags.sort()
return tags
def unlink_history_file(self, path):
try:
unlink(path)
except (FileNotFoundError, IOError):
pass
# Delete a single watch by UUID
def delete(self, uuid):
with self.lock:
if uuid == 'all':
self.__data['watching'] = {}
# GitHub #30 also delete history records
for uuid in self.data['watching']:
for path in self.data['watching'][uuid]['history'].values():
self.unlink_history_file(path)
else:
for path in self.data['watching'][uuid]['history'].values():
self.unlink_history_file(path)
del self.data['watching'][uuid]
self.needs_write = True
# Clone a watch by UUID
def clone(self, uuid):
url = self.data['watching'][uuid]['url']
tag = self.data['watching'][uuid]['tag']
extras = self.data['watching'][uuid]
new_uuid = self.add_watch(url=url, tag=tag, extras=extras)
return new_uuid
def url_exists(self, url):
# Probably their should be dict...
for watch in self.data['watching'].values():
if watch['url'] == url:
return True
return False
def get_val(self, uuid, val):
# Probably their should be dict...
return self.data['watching'][uuid].get(val)
# Remove a watchs data but keep the entry (URL etc)
def scrub_watch(self, uuid, limit_timestamp = False):
import hashlib
del_timestamps = []
changes_removed = 0
for timestamp, path in self.data['watching'][uuid]['history'].items():
if not limit_timestamp or (limit_timestamp is not False and int(timestamp) > limit_timestamp):
self.unlink_history_file(path)
del_timestamps.append(timestamp)
changes_removed += 1
if not limit_timestamp:
self.data['watching'][uuid]['last_checked'] = 0
self.data['watching'][uuid]['last_changed'] = 0
self.data['watching'][uuid]['previous_md5'] = ""
for timestamp in del_timestamps:
del self.data['watching'][uuid]['history'][str(timestamp)]
# If there was a limitstamp, we need to reset some meta data about the entry
# This has to happen after we remove the others from the list
if limit_timestamp:
newest_key = self.get_newest_history_key(uuid)
if newest_key:
self.data['watching'][uuid]['last_checked'] = int(newest_key)
# @todo should be the original value if it was less than newest key
self.data['watching'][uuid]['last_changed'] = int(newest_key)
try:
with open(self.data['watching'][uuid]['history'][str(newest_key)], "rb") as fp:
content = fp.read()
self.data['watching'][uuid]['previous_md5'] = hashlib.md5(content).hexdigest()
except (FileNotFoundError, IOError):
self.data['watching'][uuid]['previous_md5'] = ""
pass
self.needs_write = True
return changes_removed
def add_watch(self, url, tag="", extras=None):
if extras is None:
extras = {}
with self.lock:
# @todo use a common generic version of this
new_uuid = str(uuid_builder.uuid4())
_blank = deepcopy(self.generic_definition)
_blank.update({
'url': url,
'tag': tag
})
# Incase these are copied across, assume it's a reference and deepcopy()
apply_extras = deepcopy(extras)
for k in ['uuid', 'history', 'last_checked', 'last_changed', 'newest_history_key', 'previous_md5', 'viewed']:
if k in apply_extras:
del apply_extras[k]
_blank.update(apply_extras)
self.data['watching'][new_uuid] = _blank
# Get the directory ready
output_path = "{}/{}".format(self.datastore_path, new_uuid)
try:
mkdir(output_path)
except FileExistsError:
print(output_path, "already exists.")
self.sync_to_json()
return new_uuid
# Save some text file to the appropriate path and bump the history
# result_obj from fetch_site_status.run()
def save_history_text(self, watch_uuid, contents):
import uuid
output_path = "{}/{}".format(self.datastore_path, watch_uuid)
# Incase the operator deleted it, check and create.
if not os.path.isdir(output_path):
mkdir(output_path)
fname = "{}/{}.stripped.txt".format(output_path, uuid.uuid4())
with open(fname, 'wb') as f:
f.write(contents)
f.close()
return fname
def sync_to_json(self):
logging.info("Saving JSON..")
try:
data = deepcopy(self.__data)
except RuntimeError as e:
# Try again in 15 seconds
time.sleep(15)
logging.error ("! Data changed when writing to JSON, trying again.. %s", str(e))
self.sync_to_json()
return
else:
try:
# Re #286 - First write to a temp file, then confirm it looks OK and rename it
# This is a fairly basic strategy to deal with the case that the file is corrupted,
# system was out of memory, out of RAM etc
with open(self.json_store_path+".tmp", 'w') as json_file:
json.dump(data, json_file, indent=4)
os.replace(self.json_store_path+".tmp", self.json_store_path)
except Exception as e:
logging.error("Error writing JSON!! (Main JSON file save was skipped) : %s", str(e))
self.needs_write = False
# Thread runner, this helps with thread/write issues when there are many operations that want to update the JSON
# by just running periodically in one thread, according to python, dict updates are threadsafe.
def save_datastore(self):
while True:
if self.stop_thread:
print("Shutting down datastore thread")
return
if self.needs_write:
self.sync_to_json()
# Once per minute is enough, more and it can cause high CPU usage
# better here is to use something like self.app.config.exit.wait(1), but we cant get to 'app' from here
for i in range(30):
time.sleep(2)
if self.stop_thread:
break
# Go through the datastore path and remove any snapshots that are not mentioned in the index
# This usually is not used, but can be handy.
def remove_unused_snapshots(self):
print ("Removing snapshots from datastore that are not in the index..")
index=[]
for uuid in self.data['watching']:
for id in self.data['watching'][uuid]['history']:
index.append(self.data['watching'][uuid]['history'][str(id)])
import pathlib
# Only in the sub-directories
for item in pathlib.Path(self.datastore_path).rglob("*/*txt"):
if not str(item) in index:
print ("Removing",item)
unlink(item)
|
installwizard.py
|
# Copyright (C) 2018 The Electrum developers
# Distributed under the MIT software license, see the accompanying
# file LICENCE or http://www.opensource.org/licenses/mit-license.php
import os
import sys
import threading
import traceback
from typing import Tuple, List, Callable, NamedTuple, Optional
from PyQt5.QtCore import QRect, QEventLoop, Qt, pyqtSignal
from PyQt5.QtGui import QPalette, QPen, QPainter, QPixmap
from PyQt5.QtWidgets import (QWidget, QDialog, QLabel, QHBoxLayout, QMessageBox,
QVBoxLayout, QLineEdit, QFileDialog, QPushButton,
QGridLayout, QSlider, QScrollArea)
from electrum.wallet import Wallet, Abstract_Wallet
from electrum.storage import WalletStorage
from electrum.util import UserCancelled, InvalidPassword, WalletFileException
from electrum.base_wizard import BaseWizard, HWD_SETUP_DECRYPT_WALLET, GoBack
from electrum.i18n import _
from .seed_dialog import SeedLayout, KeysLayout
from .network_dialog import NetworkChoiceLayout
from .util import (MessageBoxMixin, Buttons, icon_path, ChoicesLayout, WWLabel,
InfoButton)
from .password_dialog import PasswordLayout, PasswordLayoutForHW, PW_NEW
from electrum.plugin import run_hook
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_HW_STORAGE_ENCRYPTION = _("Set wallet file encryption.") + '\n'\
+ _("Your wallet file does not contain secrets, mostly just metadata. ") \
+ _("It also contains your master public key that allows watching your addresses.") + '\n\n'\
+ _("Note: If you enable this setting, you will need your hardware device to open your wallet.")
WIF_HELP_TEXT = (_('WIF keys are typed in Electrum-KickSoccer, based on script type.') + '\n\n' +
_('A few examples') + ':\n' +
'p2pkh:KxZcY47uGp9a... \t-> 1DckmggQM...\n' +
'p2wpkh-p2sh:KxZcY47uGp9a... \t-> 3NhNeZQXF...\n' +
'p2wpkh:KxZcY47uGp9a... \t-> bc1q3fjfk...')
# note: full key is KxZcY47uGp9aVQAb6VVvuBs8SwHKgkSR2DbZUzjDzXf2N2GPhG9n
MSG_PASSPHRASE_WARN_ISSUE4566 = _("Warning") + ": "\
+ _("You have multiple consecutive whitespaces or leading/trailing "
"whitespaces in your passphrase.") + " " \
+ _("This is discouraged.") + " " \
+ _("Due to a bug, old versions of Electrum-KickSoccer will NOT be creating the "
"same wallet as newer versions or other software.")
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(0, 0, self.size, self.size)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 7, Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0]
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
try:
out = func(*args, **kwargs)
except GoBack:
wizard.go_back() if wizard.can_go_back() else wizard.close()
return
except UserCancelled:
return
#if out is None:
# out = ()
if type(out) is not tuple:
out = (out,)
run_next(*out)
return func_wrapper
class WalletAlreadyOpenInMemory(Exception):
def __init__(self, wallet: Abstract_Wallet):
super().__init__()
self.wallet = wallet
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
accept_signal = pyqtSignal()
def __init__(self, config, app, plugins):
BaseWizard.__init__(self, config, plugins)
QDialog.__init__(self, None)
self.setWindowTitle('Electrum-KickSoccer - ' + _('Install Wizard'))
self.app = app
self.config = config
# Set for base base class
self.language_for_seed = config.get('language')
self.setMinimumSize(600, 400)
self.accept_signal.connect(self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.back_button.setText(_('Back') if self.can_go_back() else _('Cancel'))
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
scroll_widget = QWidget()
scroll_widget.setLayout(inner_vbox)
scroll = QScrollArea()
scroll.setWidget(scroll_widget)
scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroll.setWidgetResizable(True)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addWidget(scroll)
hbox.setStretchFactor(scroll, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon('electrum.png')
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def select_storage(self, path, get_wallet_from_daemon) -> Tuple[str, Optional[WalletStorage]]:
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
self.name_e = QLineEdit()
hbox.addWidget(self.name_e)
button = QPushButton(_('Choose...'))
hbox.addWidget(button)
vbox.addLayout(hbox)
self.msg_label = QLabel('')
vbox.addWidget(self.msg_label)
hbox2 = QHBoxLayout()
self.pw_e = QLineEdit('', self)
self.pw_e.setFixedWidth(150)
self.pw_e.setEchoMode(2)
self.pw_label = QLabel(_('Password') + ':')
hbox2.addWidget(self.pw_label)
hbox2.addWidget(self.pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
self.set_layout(vbox, title=_('Electrum-KickSoccer wallet'))
self.temp_storage = WalletStorage(path, manual_upgrades=True)
wallet_folder = os.path.dirname(self.temp_storage.path)
def on_choose():
path, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if path:
self.name_e.setText(path)
def on_filename(filename):
path = os.path.join(wallet_folder, filename)
wallet_from_memory = get_wallet_from_daemon(path)
try:
if wallet_from_memory:
self.temp_storage = wallet_from_memory.storage
else:
self.temp_storage = WalletStorage(path, manual_upgrades=True)
self.next_button.setEnabled(True)
except BaseException:
traceback.print_exc(file=sys.stderr)
self.temp_storage = None
self.next_button.setEnabled(False)
user_needs_to_enter_password = False
if self.temp_storage:
if not self.temp_storage.file_exists():
msg =_("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or choose another file.")
elif not wallet_from_memory:
if self.temp_storage.is_encrypted_with_user_pw():
msg = _("This file is encrypted with a password.") + '\n' \
+ _('Enter your password or choose another file.')
user_needs_to_enter_password = True
elif self.temp_storage.is_encrypted_with_hw_device():
msg = _("This file is encrypted using a hardware device.") + '\n' \
+ _("Press 'Next' to choose device to decrypt.")
else:
msg = _("Press 'Next' to open this wallet.")
else:
msg = _("This file is already open in memory.") + "\n" \
+ _("Press 'Next' to create/focus window.")
else:
msg = _('Cannot read file')
self.msg_label.setText(msg)
if user_needs_to_enter_password:
self.pw_label.show()
self.pw_e.show()
self.pw_e.setFocus()
else:
self.pw_label.hide()
self.pw_e.hide()
button.clicked.connect(on_choose)
self.name_e.textChanged.connect(on_filename)
n = os.path.basename(self.temp_storage.path)
self.name_e.setText(n)
while True:
if self.loop.exec_() != 2: # 2 = next
raise UserCancelled
if self.temp_storage.file_exists() and not self.temp_storage.is_encrypted():
break
if not self.temp_storage.file_exists():
break
wallet_from_memory = get_wallet_from_daemon(self.temp_storage.path)
if wallet_from_memory:
raise WalletAlreadyOpenInMemory(wallet_from_memory)
if self.temp_storage.file_exists() and self.temp_storage.is_encrypted():
if self.temp_storage.is_encrypted_with_user_pw():
password = self.pw_e.text()
try:
self.temp_storage.decrypt(password)
break
except InvalidPassword as e:
QMessageBox.information(None, _('Error'), str(e))
continue
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e))
raise UserCancelled()
elif self.temp_storage.is_encrypted_with_hw_device():
try:
self.run('choose_hw_device', HWD_SETUP_DECRYPT_WALLET, storage=self.temp_storage)
except InvalidPassword as e:
QMessageBox.information(
None, _('Error'),
_('Failed to decrypt using this hardware device.') + '\n' +
_('If you use a passphrase, make sure it is correct.'))
self.reset_stack()
return self.select_storage(path, get_wallet_from_daemon)
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e))
raise UserCancelled()
if self.temp_storage.is_past_initial_decryption():
break
else:
raise UserCancelled()
else:
raise Exception('Unexpected encryption version')
return self.temp_storage.path, (self.temp_storage if self.temp_storage.file_exists() else None)
def run_upgrades(self, storage):
path = storage.path
if storage.requires_split():
self.hide()
msg = _("The wallet '{}' contains multiple accounts, which are no longer supported since Electrum-KickSoccer 2.7.\n\n"
"Do you want to split your wallet into multiple files?").format(path)
if not self.question(msg):
return
file_list = '\n'.join(storage.split_accounts())
msg = _('Your accounts have been moved to') + ':\n' + file_list + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
# raise now, to avoid having the old storage opened
raise UserCancelled()
action = storage.get_action()
if action and storage.requires_upgrade():
raise WalletFileException('Incomplete wallet files cannot be upgraded.')
if action:
self.hide()
msg = _("The file '{}' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?").format(path)
if not self.question(msg):
if self.question(_("Do you want to delete '{}'?").format(path)):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
self.data = storage.db.data # FIXME
self.run(action)
for k, v in self.data.items():
storage.put(k, v)
storage.write()
return
if storage.requires_upgrade():
self.upgrade_storage(storage)
def finished(self):
"""Called in hardware client wrapper, in order to close popups."""
return
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QPixmap(icon_path(filename))
.scaledToWidth(60, mode=Qt.SmoothTransformation))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True):
self.set_layout(layout, title, next_enabled)
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled
if result == 1:
raise GoBack from None
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid, allow_multi=False):
slayout = KeysLayout(parent=self, header_layout=message, is_valid=is_valid,
allow_multi=allow_multi)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(title=message, is_seed=is_seed, options=options, parent=self)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.is_bip39, slayout.is_ext
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next, allow_multi=False, show_wif_help=False):
header_layout = QHBoxLayout()
label = WWLabel(message)
label.setMinimumWidth(400)
header_layout.addWidget(label)
if show_wif_help:
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
return self.text_input(title, header_layout, is_valid, allow_multi)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
if self.opt_ext:
options.append('ext')
if self.opt_bip39:
options.append('bip39')
title = _('Enter Seed')
message = _('Please enter your seed phrase in order to restore your wallet.')
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, is_bip39, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(seed=seed_text, title=title, msg=True, options=['ext'])
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind, force_disable_encrypt_cb):
playout = PasswordLayout(msg=msg, kind=kind, OK_button=self.next_button,
force_disable_encrypt_cb=force_disable_encrypt_cb)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.new_password(), playout.encrypt_cb.isChecked()
@wizard_dialog
def request_password(self, run_next, force_disable_encrypt_cb=False):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW, force_disable_encrypt_cb)
@wizard_dialog
def request_storage_encryption(self, run_next):
playout = PasswordLayoutForHW(MSG_HW_STORAGE_ENCRYPTION)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.encrypt_cb.isChecked()
@wizard_dialog
def confirm_dialog(self, title, message, run_next):
self.confirm(message, title)
def confirm(self, message, title):
label = WWLabel(message)
vbox = QVBoxLayout()
vbox.addWidget(label)
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self, **kwargs):
self.accept_signal.emit()
def waiting_dialog(self, task, msg, on_finished=None):
label = WWLabel(msg)
vbox = QVBoxLayout()
vbox.addSpacing(100)
label.setMinimumWidth(300)
label.setAlignment(Qt.AlignCenter)
vbox.addWidget(label)
self.set_layout(vbox, next_enabled=False)
self.back_button.setEnabled(False)
t = threading.Thread(target=task)
t.start()
while True:
t.join(1.0/60)
if t.is_alive():
self.refresh_gui()
else:
break
if on_finished:
on_finished()
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next):
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def choice_and_line_dialog(self, title: str, message1: str, choices: List[Tuple[str, str, str]],
message2: str, test_text: Callable[[str], int],
run_next, default_choice_idx: int=0) -> Tuple[str, str]:
vbox = QVBoxLayout()
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
c_default_text = [x[2] for x in choices]
def on_choice_click(clayout):
idx = clayout.selected_index()
line.setText(c_default_text[idx])
clayout = ChoicesLayout(message1, c_titles, on_choice_click,
checked_index=default_choice_idx)
vbox.addLayout(clayout.layout())
vbox.addSpacing(50)
vbox.addWidget(WWLabel(message2))
line = QLineEdit()
def on_text_change(text):
self.next_button.setEnabled(test_text(text))
line.textEdited.connect(on_text_change)
on_choice_click(clayout) # set default text for "line"
vbox.addWidget(line)
self.exec_layout(vbox, title)
choice = c_values[clayout.selected_index()]
return str(line.text()), choice
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning='',
presets=(), warn_issue4566=False):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(text))
if warn_issue4566:
text_whitespace_normalised = ' '.join(text.split())
warn_issue4566_label.setVisible(text != text_whitespace_normalised)
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
warn_issue4566_label = WWLabel(MSG_PASSPHRASE_WARN_ISSUE4566)
warn_issue4566_label.setVisible(False)
vbox.addWidget(warn_issue4566_label)
for preset in presets:
button = QPushButton(preset[0])
button.clicked.connect(lambda __, text=preset[1]: line.setText(text))
button.setMinimumWidth(150)
hbox = QHBoxLayout()
hbox.addWidget(button, alignment=Qt.AlignCenter)
vbox.addLayout(hbox)
self.exec_layout(vbox, title, next_enabled=test(default))
return line.text()
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(xpub, title=msg, icon=False, for_seed_words=False)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network):
message = _("Electrum-KickSoccer communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfill the same purpose only differing in "
"hardware. In most cases you simply want to let Electrum-KickSoccer "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_('Cancel'))
self.exec_layout(clayout.layout(), title)
r = clayout.selected_index()
if r == 1:
nlayout = NetworkChoiceLayout(network, self.config, wizard=True)
if self.exec_layout(nlayout.layout()):
nlayout.accept()
else:
network.auto_connect = True
self.config.set_key('auto_connect', True, True)
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require {0} signatures').format(m))
cw.set_m(m)
def on_n(n):
n_label.setText(_('From {0} cosigners').format(n))
cw.set_n(n)
m_edit.setMaximum(n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
on_n(2)
on_m(2)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
|
widget.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
The widget is called from web2py.
"""
import datetime
import sys
import cStringIO
import time
import thread
import threading
import os
import socket
import signal
import math
import logging
import newcron
import getpass
import gluon.main as main
from gluon.fileutils import read_file, write_file, create_welcome_w2p
from gluon.settings import global_settings
from gluon.shell import run, test
from gluon.utils import is_valid_ip_address, is_loopback_ip_address, getipaddrinfo
ProgramName = 'web2py Web Framework'
ProgramAuthor = 'Created by Massimo Di Pierro, Copyright 2007-' + str(
datetime.datetime.now().year)
ProgramVersion = read_file('VERSION').strip()
ProgramInfo = '''%s
%s
%s''' % (ProgramName, ProgramAuthor, ProgramVersion)
if not sys.version[:3] in ['2.5', '2.6', '2.7']:
msg = 'Warning: web2py requires Python 2.5, 2.6 or 2.7 but you are running:\n%s'
msg = msg % sys.version
sys.stderr.write(msg)
logger = logging.getLogger("web2py")
def run_system_tests(options):
"""
Runs unittests for gluon.tests
"""
import subprocess
major_version = sys.version_info[0]
minor_version = sys.version_info[1]
if major_version == 2:
if minor_version in (5, 6):
sys.stderr.write("Python 2.5 or 2.6\n")
ret = subprocess.call(['unit2', '-v', 'gluon.tests'])
elif minor_version in (7,):
call_args = [sys.executable, '-m', 'unittest', '-v', 'gluon.tests']
if options.with_coverage:
try:
import coverage
coverage_config = os.environ.get(
"COVERAGE_PROCESS_START",
os.path.join('gluon', 'tests', 'coverage.ini'))
call_args = ['coverage', 'run', '--rcfile=%s' %
coverage_config,
'-m', 'unittest', '-v', 'gluon.tests']
except:
sys.stderr.write('Coverage was not installed, skipping\n')
sys.stderr.write("Python 2.7\n")
ret = subprocess.call(call_args)
else:
sys.stderr.write("unknown python 2.x version\n")
ret = 256
else:
sys.stderr.write("Only Python 2.x supported.\n")
ret = 256
sys.exit(ret and 1)
class IO(object):
""" """
def __init__(self):
""" """
self.buffer = cStringIO.StringIO()
def write(self, data):
""" """
sys.__stdout__.write(data)
if hasattr(self, 'callback'):
self.callback(data)
else:
self.buffer.write(data)
def get_url(host, path='/', proto='http', port=80):
if ':' in host:
host = '[%s]' % host
else:
host = host.replace('0.0.0.0', '127.0.0.1')
if path.startswith('/'):
path = path[1:]
if proto.endswith(':'):
proto = proto[:-1]
if not port or port == 80:
port = ''
else:
port = ':%s' % port
return '%s://%s%s/%s' % (proto, host, port, path)
def start_browser(url, startup=False):
if startup:
print 'please visit:'
print '\t', url
print 'starting browser...'
try:
import webbrowser
webbrowser.open(url)
except:
print 'warning: unable to detect your browser'
def presentation(root):
""" Draw the splash screen """
import Tkinter
root.withdraw()
dx = root.winfo_screenwidth()
dy = root.winfo_screenheight()
dialog = Tkinter.Toplevel(root, bg='white')
dialog.geometry('%ix%i+%i+%i' % (500, 300, dx / 2 - 200, dy / 2 - 150))
dialog.overrideredirect(1)
dialog.focus_force()
canvas = Tkinter.Canvas(dialog,
background='white',
width=500,
height=300)
canvas.pack()
root.update()
logo = os.path.join('extras','icons','splashlogo.gif')
if os.path.exists(logo):
img = Tkinter.PhotoImage(file=logo)
pnl = Tkinter.Label(canvas, image=img, background='white', bd=0)
pnl.pack(side='top', fill='both', expand='yes')
# Prevent garbage collection of img
pnl.image = img
def add_label(text='Change Me', font_size=12,
foreground='#195866', height=1):
return Tkinter.Label(
master=canvas,
width=250,
height=height,
text=text,
font=('Helvetica', font_size),
anchor=Tkinter.CENTER,
foreground=foreground,
background='white'
)
add_label('Welcome to...').pack(side='top')
add_label(ProgramName, 18, '#FF5C1F', 2).pack()
add_label(ProgramAuthor).pack()
add_label(ProgramVersion).pack()
root.update()
time.sleep(5)
dialog.destroy()
return
class web2pyDialog(object):
""" Main window dialog """
def __init__(self, root, options):
""" web2pyDialog constructor """
import Tkinter
import tkMessageBox
root.title('web2py server')
self.root = Tkinter.Toplevel(root)
self.options = options
self.scheduler_processes = {}
self.menu = Tkinter.Menu(self.root)
servermenu = Tkinter.Menu(self.menu, tearoff=0)
httplog = os.path.join(self.options.folder, 'httpserver.log')
iconphoto = os.path.join('extras','icons','web2py.gif')
if os.path.exists(iconphoto):
img = Tkinter.PhotoImage(file=iconphoto)
self.root.tk.call('wm', 'iconphoto', self.root._w, img)
# Building the Menu
item = lambda: start_browser(httplog)
servermenu.add_command(label='View httpserver.log',
command=item)
servermenu.add_command(label='Quit (pid:%i)' % os.getpid(),
command=self.quit)
self.menu.add_cascade(label='Server', menu=servermenu)
self.pagesmenu = Tkinter.Menu(self.menu, tearoff=0)
self.menu.add_cascade(label='Pages', menu=self.pagesmenu)
#scheduler menu
self.schedmenu = Tkinter.Menu(self.menu, tearoff=0)
self.menu.add_cascade(label='Scheduler', menu=self.schedmenu)
#start and register schedulers from options
self.update_schedulers(start=True)
helpmenu = Tkinter.Menu(self.menu, tearoff=0)
# Home Page
item = lambda: start_browser('http://www.web2py.com/')
helpmenu.add_command(label='Home Page',
command=item)
# About
item = lambda: tkMessageBox.showinfo('About web2py', ProgramInfo)
helpmenu.add_command(label='About',
command=item)
self.menu.add_cascade(label='Info', menu=helpmenu)
self.root.config(menu=self.menu)
if options.taskbar:
self.root.protocol('WM_DELETE_WINDOW',
lambda: self.quit(True))
else:
self.root.protocol('WM_DELETE_WINDOW', self.quit)
sticky = Tkinter.NW
# IP
Tkinter.Label(self.root,
text='Server IP:',
justify=Tkinter.LEFT).grid(row=0,
column=0,
sticky=sticky)
self.ips = {}
self.selected_ip = Tkinter.StringVar()
row = 0
ips = [('127.0.0.1', 'Local (IPv4)')] + \
([('::1', 'Local (IPv6)')] if socket.has_ipv6 else []) + \
[(ip, 'Public') for ip in options.ips] + \
[('0.0.0.0', 'Public')]
for ip, legend in ips:
self.ips[ip] = Tkinter.Radiobutton(
self.root, text='%s (%s)' % (legend, ip),
variable=self.selected_ip, value=ip)
self.ips[ip].grid(row=row, column=1, sticky=sticky)
if row == 0:
self.ips[ip].select()
row += 1
shift = row
# Port
Tkinter.Label(self.root,
text='Server Port:',
justify=Tkinter.LEFT).grid(row=shift,
column=0,
sticky=sticky)
self.port_number = Tkinter.Entry(self.root)
self.port_number.insert(Tkinter.END, self.options.port)
self.port_number.grid(row=shift, column=1, sticky=sticky)
# Password
Tkinter.Label(self.root,
text='Choose Password:',
justify=Tkinter.LEFT).grid(row=shift + 1,
column=0,
sticky=sticky)
self.password = Tkinter.Entry(self.root, show='*')
self.password.bind('<Return>', lambda e: self.start())
self.password.focus_force()
self.password.grid(row=shift + 1, column=1, sticky=sticky)
# Prepare the canvas
self.canvas = Tkinter.Canvas(self.root,
width=300,
height=100,
bg='black')
self.canvas.grid(row=shift + 2, column=0, columnspan=2)
self.canvas.after(1000, self.update_canvas)
# Prepare the frame
frame = Tkinter.Frame(self.root)
frame.grid(row=shift + 3, column=0, columnspan=2)
# Start button
self.button_start = Tkinter.Button(frame,
text='start server',
command=self.start)
self.button_start.grid(row=0, column=0)
# Stop button
self.button_stop = Tkinter.Button(frame,
text='stop server',
command=self.stop)
self.button_stop.grid(row=0, column=1)
self.button_stop.configure(state='disabled')
if options.taskbar:
import gluon.contrib.taskbar_widget
self.tb = gluon.contrib.taskbar_widget.TaskBarIcon()
self.checkTaskBar()
if options.password != '<ask>':
self.password.insert(0, options.password)
self.start()
self.root.withdraw()
else:
self.tb = None
def update_schedulers(self, start=False):
apps = []
available_apps = [arq for arq in os.listdir('applications/')]
available_apps = [arq for arq in available_apps
if os.path.exists(
'applications/%s/models/scheduler.py' % arq)]
if start:
#the widget takes care of starting the scheduler
if self.options.scheduler and self.options.with_scheduler:
apps = [app.strip() for app
in self.options.scheduler.split(',')
if app in available_apps]
for app in apps:
self.try_start_scheduler(app)
#reset the menu
self.schedmenu.delete(0, len(available_apps))
for arq in available_apps:
if arq not in self.scheduler_processes:
item = lambda u = arq: self.try_start_scheduler(u)
self.schedmenu.add_command(label="start %s" % arq,
command=item)
if arq in self.scheduler_processes:
item = lambda u = arq: self.try_stop_scheduler(u)
self.schedmenu.add_command(label="stop %s" % arq,
command=item)
def start_schedulers(self, app):
try:
from multiprocessing import Process
except:
sys.stderr.write('Sorry, -K only supported for python 2.6-2.7\n')
return
code = "from gluon import current;current._scheduler.loop()"
print 'starting scheduler from widget for "%s"...' % app
args = (app, True, True, None, False, code)
logging.getLogger().setLevel(self.options.debuglevel)
p = Process(target=run, args=args)
self.scheduler_processes[app] = p
self.update_schedulers()
print "Currently running %s scheduler processes" % (
len(self.scheduler_processes))
p.start()
print "Processes started"
def try_stop_scheduler(self, app):
if app in self.scheduler_processes:
p = self.scheduler_processes[app]
del self.scheduler_processes[app]
p.terminate()
p.join()
self.update_schedulers()
def try_start_scheduler(self, app):
if app not in self.scheduler_processes:
t = threading.Thread(target=self.start_schedulers, args=(app,))
t.start()
def checkTaskBar(self):
""" Check taskbar status """
if self.tb.status:
if self.tb.status[0] == self.tb.EnumStatus.QUIT:
self.quit()
elif self.tb.status[0] == self.tb.EnumStatus.TOGGLE:
if self.root.state() == 'withdrawn':
self.root.deiconify()
else:
self.root.withdraw()
elif self.tb.status[0] == self.tb.EnumStatus.STOP:
self.stop()
elif self.tb.status[0] == self.tb.EnumStatus.START:
self.start()
elif self.tb.status[0] == self.tb.EnumStatus.RESTART:
self.stop()
self.start()
del self.tb.status[0]
self.root.after(1000, self.checkTaskBar)
def update(self, text):
""" Update app text """
try:
self.text.configure(state='normal')
self.text.insert('end', text)
self.text.configure(state='disabled')
except:
pass # ## this should only happen in case app is destroyed
def connect_pages(self):
""" Connect pages """
#reset the menu
available_apps = [arq for arq in os.listdir('applications/')
if os.path.exists(
'applications/%s/__init__.py' % arq)]
self.pagesmenu.delete(0, len(available_apps))
for arq in available_apps:
url = self.url + arq
self.pagesmenu.add_command(
label=url, command=lambda u=url: start_browser(u))
def quit(self, justHide=False):
""" Finish the program execution """
if justHide:
self.root.withdraw()
else:
try:
scheds = self.scheduler_processes.keys()
for t in scheds:
self.try_stop_scheduler(t)
except:
pass
try:
newcron.stopcron()
except:
pass
try:
self.server.stop()
except:
pass
try:
self.tb.Destroy()
except:
pass
self.root.destroy()
sys.exit(0)
def error(self, message):
""" Show error message """
import tkMessageBox
tkMessageBox.showerror('web2py start server', message)
def start(self):
""" Start web2py server """
password = self.password.get()
if not password:
self.error('no password, no web admin interface')
ip = self.selected_ip.get()
if not is_valid_ip_address(ip):
return self.error('invalid host ip address')
try:
port = int(self.port_number.get())
except:
return self.error('invalid port number')
# Check for non default value for ssl inputs
if (len(self.options.ssl_certificate) > 0 or
len(self.options.ssl_private_key) > 0):
proto = 'https'
else:
proto = 'http'
self.url = get_url(ip, proto=proto, port=port)
self.connect_pages()
self.button_start.configure(state='disabled')
try:
options = self.options
req_queue_size = options.request_queue_size
self.server = main.HttpServer(
ip,
port,
password,
pid_filename=options.pid_filename,
log_filename=options.log_filename,
profiler_dir=options.profiler_dir,
ssl_certificate=options.ssl_certificate,
ssl_private_key=options.ssl_private_key,
ssl_ca_certificate=options.ssl_ca_certificate,
min_threads=options.minthreads,
max_threads=options.maxthreads,
server_name=options.server_name,
request_queue_size=req_queue_size,
timeout=options.timeout,
shutdown_timeout=options.shutdown_timeout,
path=options.folder,
interfaces=options.interfaces)
thread.start_new_thread(self.server.start, ())
except Exception, e:
self.button_start.configure(state='normal')
return self.error(str(e))
if not self.server_ready():
self.button_start.configure(state='normal')
return
self.button_stop.configure(state='normal')
if not options.taskbar:
thread.start_new_thread(
start_browser, (get_url(ip, proto=proto, port=port), True))
self.password.configure(state='readonly')
[ip.configure(state='disabled') for ip in self.ips.values()]
self.port_number.configure(state='readonly')
if self.tb:
self.tb.SetServerRunning()
def server_ready(self):
for listener in self.server.server.listeners:
if listener.ready:
return True
return False
def stop(self):
""" Stop web2py server """
self.button_start.configure(state='normal')
self.button_stop.configure(state='disabled')
self.password.configure(state='normal')
[ip.configure(state='normal') for ip in self.ips.values()]
self.port_number.configure(state='normal')
self.server.stop()
if self.tb:
self.tb.SetServerStopped()
def update_canvas(self):
""" Update canvas """
try:
t1 = os.path.getsize('httpserver.log')
except:
self.canvas.after(1000, self.update_canvas)
return
try:
fp = open('httpserver.log', 'r')
fp.seek(self.t0)
data = fp.read(t1 - self.t0)
fp.close()
value = self.p0[1:] + [10 + 90.0 / math.sqrt(1 + data.count('\n'))]
self.p0 = value
for i in xrange(len(self.p0) - 1):
c = self.canvas.coords(self.q0[i])
self.canvas.coords(self.q0[i],
(c[0],
self.p0[i],
c[2],
self.p0[i + 1]))
self.t0 = t1
except BaseException:
self.t0 = time.time()
self.t0 = t1
self.p0 = [100] * 300
self.q0 = [self.canvas.create_line(i, 100, i + 1, 100,
fill='green') for i in xrange(len(self.p0) - 1)]
self.canvas.after(1000, self.update_canvas)
def console():
""" Defines the behavior of the console web2py execution """
import optparse
import textwrap
usage = "python web2py.py"
description = """\
web2py Web Framework startup script.
ATTENTION: unless a password is specified (-a 'passwd') web2py will
attempt to run a GUI. In this case command line options are ignored."""
description = textwrap.dedent(description)
parser = optparse.OptionParser(
usage, None, optparse.Option, ProgramVersion)
parser.description = description
msg = ('IP address of the server (e.g., 127.0.0.1 or ::1); '
'Note: This value is ignored when using the \'interfaces\' option.')
parser.add_option('-i',
'--ip',
default='127.0.0.1',
dest='ip',
help=msg)
parser.add_option('-p',
'--port',
default='8000',
dest='port',
type='int',
help='port of server (8000)')
msg = ('password to be used for administration '
'(use -a "<recycle>" to reuse the last password))')
parser.add_option('-a',
'--password',
default='<ask>',
dest='password',
help=msg)
parser.add_option('-c',
'--ssl_certificate',
default='',
dest='ssl_certificate',
help='file that contains ssl certificate')
parser.add_option('-k',
'--ssl_private_key',
default='',
dest='ssl_private_key',
help='file that contains ssl private key')
msg = ('Use this file containing the CA certificate to validate X509 '
'certificates from clients')
parser.add_option('--ca-cert',
action='store',
dest='ssl_ca_certificate',
default=None,
help=msg)
parser.add_option('-d',
'--pid_filename',
default='httpserver.pid',
dest='pid_filename',
help='file to store the pid of the server')
parser.add_option('-l',
'--log_filename',
default='httpserver.log',
dest='log_filename',
help='file to log connections')
parser.add_option('-n',
'--numthreads',
default=None,
type='int',
dest='numthreads',
help='number of threads (deprecated)')
parser.add_option('--minthreads',
default=None,
type='int',
dest='minthreads',
help='minimum number of server threads')
parser.add_option('--maxthreads',
default=None,
type='int',
dest='maxthreads',
help='maximum number of server threads')
parser.add_option('-s',
'--server_name',
default=socket.gethostname(),
dest='server_name',
help='server name for the web server')
msg = 'max number of queued requests when server unavailable'
parser.add_option('-q',
'--request_queue_size',
default='5',
type='int',
dest='request_queue_size',
help=msg)
parser.add_option('-o',
'--timeout',
default='10',
type='int',
dest='timeout',
help='timeout for individual request (10 seconds)')
parser.add_option('-z',
'--shutdown_timeout',
default='5',
type='int',
dest='shutdown_timeout',
help='timeout on shutdown of server (5 seconds)')
parser.add_option('--socket-timeout',
default=5,
type='int',
dest='socket_timeout',
help='timeout for socket (5 second)')
parser.add_option('-f',
'--folder',
default=os.getcwd(),
dest='folder',
help='folder from which to run web2py')
parser.add_option('-v',
'--verbose',
action='store_true',
dest='verbose',
default=False,
help='increase --test verbosity')
parser.add_option('-Q',
'--quiet',
action='store_true',
dest='quiet',
default=False,
help='disable all output')
msg = ('set debug output level (0-100, 0 means all, 100 means none; '
'default is 30)')
parser.add_option('-D',
'--debug',
dest='debuglevel',
default=30,
type='int',
help=msg)
msg = ('run web2py in interactive shell or IPython (if installed) with '
'specified appname (if app does not exist it will be created). '
'APPNAME like a/c/f (c,f optional)')
parser.add_option('-S',
'--shell',
dest='shell',
metavar='APPNAME',
help=msg)
msg = ('run web2py in interactive shell or bpython (if installed) with '
'specified appname (if app does not exist it will be created).\n'
'Use combined with --shell')
parser.add_option('-B',
'--bpython',
action='store_true',
default=False,
dest='bpython',
help=msg)
msg = 'only use plain python shell; should be used with --shell option'
parser.add_option('-P',
'--plain',
action='store_true',
default=False,
dest='plain',
help=msg)
msg = ('auto import model files; default is False; should be used '
'with --shell option')
parser.add_option('-M',
'--import_models',
action='store_true',
default=False,
dest='import_models',
help=msg)
msg = ('run PYTHON_FILE in web2py environment; '
'should be used with --shell option')
parser.add_option('-R',
'--run',
dest='run',
metavar='PYTHON_FILE',
default='',
help=msg)
msg = ('run scheduled tasks for the specified apps: expects a list of '
'app names as -K app1,app2,app3 '
'or a list of app:groups as -K app1:group1:group2,app2:group1 '
'to override specific group_names. (only strings, no spaces '
'allowed. Requires a scheduler defined in the models')
parser.add_option('-K',
'--scheduler',
dest='scheduler',
default=None,
help=msg)
msg = 'run schedulers alongside webserver, needs -K app1 and -a too'
parser.add_option('-X',
'--with-scheduler',
action='store_true',
default=False,
dest='with_scheduler',
help=msg)
msg = ('run doctests in web2py environment; '
'TEST_PATH like a/c/f (c,f optional)')
parser.add_option('-T',
'--test',
dest='test',
metavar='TEST_PATH',
default=None,
help=msg)
parser.add_option('-W',
'--winservice',
dest='winservice',
default='',
help='-W install|start|stop as Windows service')
msg = 'trigger a cron run manually; usually invoked from a system crontab'
parser.add_option('-C',
'--cron',
action='store_true',
dest='extcron',
default=False,
help=msg)
msg = 'triggers the use of softcron'
parser.add_option('--softcron',
action='store_true',
dest='softcron',
default=False,
help=msg)
parser.add_option('-Y',
'--run-cron',
action='store_true',
dest='runcron',
default=False,
help='start the background cron process')
parser.add_option('-J',
'--cronjob',
action='store_true',
dest='cronjob',
default=False,
help='identify cron-initiated command')
parser.add_option('-L',
'--config',
dest='config',
default='',
help='config file')
parser.add_option('-F',
'--profiler',
dest='profiler_dir',
default=None,
help='profiler dir')
parser.add_option('-t',
'--taskbar',
action='store_true',
dest='taskbar',
default=False,
help='use web2py gui and run in taskbar (system tray)')
parser.add_option('',
'--nogui',
action='store_true',
default=False,
dest='nogui',
help='text-only, no GUI')
msg = ('should be followed by a list of arguments to be passed to script, '
'to be used with -S, -A must be the last option')
parser.add_option('-A',
'--args',
action='store',
dest='args',
default=None,
help=msg)
parser.add_option('--no-banner',
action='store_true',
default=False,
dest='nobanner',
help='Do not print header banner')
msg = ('listen on multiple addresses: '
'"ip1:port1:key1:cert1:ca_cert1;ip2:port2:key2:cert2:ca_cert2;..." '
'(:key:cert:ca_cert optional; no spaces; IPv6 addresses must be in '
'square [] brackets)')
parser.add_option('--interfaces',
action='store',
dest='interfaces',
default=None,
help=msg)
msg = 'runs web2py tests'
parser.add_option('--run_system_tests',
action='store_true',
dest='run_system_tests',
default=False,
help=msg)
msg = ('adds coverage reporting (needs --run_system_tests), '
'python 2.7 and the coverage module installed. '
'You can alter the default path setting the environmental '
'var "COVERAGE_PROCESS_START". '
'By default it takes gluon/tests/coverage.ini')
parser.add_option('--with_coverage',
action='store_true',
dest='with_coverage',
default=False,
help=msg)
if '-A' in sys.argv:
k = sys.argv.index('-A')
elif '--args' in sys.argv:
k = sys.argv.index('--args')
else:
k = len(sys.argv)
sys.argv, other_args = sys.argv[:k], sys.argv[k + 1:]
(options, args) = parser.parse_args()
options.args = [options.run] + other_args
global_settings.cmd_options = options
global_settings.cmd_args = args
try:
options.ips = list(set( # no duplicates
[addrinfo[4][0] for addrinfo in getipaddrinfo(socket.getfqdn())
if not is_loopback_ip_address(addrinfo=addrinfo)]))
except socket.gaierror:
options.ips = []
if options.run_system_tests:
run_system_tests(options)
if options.quiet:
capture = cStringIO.StringIO()
sys.stdout = capture
logger.setLevel(logging.CRITICAL + 1)
else:
logger.setLevel(options.debuglevel)
if options.config[-3:] == '.py':
options.config = options.config[:-3]
if options.cronjob:
global_settings.cronjob = True # tell the world
options.plain = True # cronjobs use a plain shell
options.nobanner = True
options.nogui = True
options.folder = os.path.abspath(options.folder)
# accept --interfaces in the form
# "ip1:port1:key1:cert1:ca_cert1;[ip2]:port2;ip3:port3:key3:cert3"
# (no spaces; optional key:cert indicate SSL)
if isinstance(options.interfaces, str):
interfaces = options.interfaces.split(';')
options.interfaces = []
for interface in interfaces:
if interface.startswith('['): # IPv6
ip, if_remainder = interface.split(']', 1)
ip = ip[1:]
if_remainder = if_remainder[1:].split(':')
if_remainder[0] = int(if_remainder[0]) # numeric port
options.interfaces.append(tuple([ip] + if_remainder))
else: # IPv4
interface = interface.split(':')
interface[1] = int(interface[1]) # numeric port
options.interfaces.append(tuple(interface))
# accepts --scheduler in the form
# "app:group1,group2,app2:group1"
scheduler = []
options.scheduler_groups = None
if isinstance(options.scheduler, str):
if ':' in options.scheduler:
for opt in options.scheduler.split(','):
scheduler.append(opt.split(':'))
options.scheduler = ','.join([app[0] for app in scheduler])
options.scheduler_groups = scheduler
if options.numthreads is not None and options.minthreads is None:
options.minthreads = options.numthreads # legacy
create_welcome_w2p()
if not options.cronjob:
# If we have the applications package or if we should upgrade
if not os.path.exists('applications/__init__.py'):
write_file('applications/__init__.py', '')
return options, args
def check_existent_app(options, appname):
if os.path.isdir(os.path.join(options.folder, 'applications', appname)):
return True
def get_code_for_scheduler(app, options):
if len(app) == 1 or app[1] is None:
code = "from gluon import current;current._scheduler.loop()"
else:
code = "from gluon import current;current._scheduler.group_names = ['%s'];"
code += "current._scheduler.loop()"
code = code % ("','".join(app[1:]))
app_ = app[0]
if not check_existent_app(options, app_):
print "Application '%s' doesn't exist, skipping" % app_
return None, None
return app_, code
def start_schedulers(options):
try:
from multiprocessing import Process
except:
sys.stderr.write('Sorry, -K only supported for python 2.6-2.7\n')
return
processes = []
apps = [(app.strip(), None) for app in options.scheduler.split(',')]
if options.scheduler_groups:
apps = options.scheduler_groups
code = "from gluon import current;current._scheduler.loop()"
logging.getLogger().setLevel(options.debuglevel)
if len(apps) == 1 and not options.with_scheduler:
app_, code = get_code_for_scheduler(apps[0], options)
if not app_:
return
print 'starting single-scheduler for "%s"...' % app_
run(app_, True, True, None, False, code)
return
for app in apps:
app_, code = get_code_for_scheduler(app, options)
if not app_:
continue
print 'starting scheduler for "%s"...' % app_
args = (app_, True, True, None, False, code)
p = Process(target=run, args=args)
processes.append(p)
print "Currently running %s scheduler processes" % (len(processes))
p.start()
##to avoid bashing the db at the same time
time.sleep(0.7)
print "Processes started"
for p in processes:
try:
p.join()
except (KeyboardInterrupt, SystemExit):
print "Processes stopped"
except:
p.terminate()
p.join()
def start(cron=True):
""" Start server """
# ## get command line arguments
(options, args) = console()
if not options.nobanner:
print ProgramName
print ProgramAuthor
print ProgramVersion
from dal import DRIVERS
if not options.nobanner:
print 'Database drivers available: %s' % ', '.join(DRIVERS)
# ## if -L load options from options.config file
if options.config:
try:
options2 = __import__(options.config, {}, {}, '')
except Exception:
try:
# Jython doesn't like the extra stuff
options2 = __import__(options.config)
except Exception:
print 'Cannot import config file [%s]' % options.config
sys.exit(1)
for key in dir(options2):
if hasattr(options, key):
setattr(options, key, getattr(options2, key))
logfile0 = os.path.join('extras','examples','logging.example.conf')
if not os.path.exists('logging.conf') and os.path.exists(logfile0):
import shutil
sys.stdout.write("Copying logging.conf.example to logging.conf ... ")
shutil.copyfile('logging.example.conf', logfile0)
sys.stdout.write("OK\n")
# ## if -T run doctests (no cron)
if hasattr(options, 'test') and options.test:
test(options.test, verbose=options.verbose)
return
# ## if -S start interactive shell (also no cron)
if options.shell:
if not options.args is None:
sys.argv[:] = options.args
run(options.shell, plain=options.plain, bpython=options.bpython,
import_models=options.import_models, startfile=options.run,
cronjob=options.cronjob)
return
# ## if -C start cron run (extcron) and exit
# ## -K specifies optional apps list (overloading scheduler)
if options.extcron:
logger.debug('Starting extcron...')
global_settings.web2py_crontype = 'external'
if options.scheduler: # -K
apps = [app.strip() for app in options.scheduler.split(
',') if check_existent_app(options, app.strip())]
else:
apps = None
extcron = newcron.extcron(options.folder, apps=apps)
extcron.start()
extcron.join()
return
# ## if -K
if options.scheduler and not options.with_scheduler:
try:
start_schedulers(options)
except KeyboardInterrupt:
pass
return
# ## if -W install/start/stop web2py as service
if options.winservice:
if os.name == 'nt':
try:
from winservice import register_service_handler, Web2pyService
register_service_handler(
argv=['', options.winservice],
opt_file=options.config,
cls=Web2pyService)
except ImportError:
print 'Error: Missing python module winservice'
sys.exit(1)
else:
print 'Error: Windows services not supported on this platform'
sys.exit(1)
return
# ## if -H cron is enabled in this *process*
# ## if --softcron use softcron
# ## use hardcron in all other cases
if cron and options.runcron and options.softcron:
print 'Using softcron (but this is not very efficient)'
global_settings.web2py_crontype = 'soft'
elif cron and options.runcron:
logger.debug('Starting hardcron...')
global_settings.web2py_crontype = 'hard'
newcron.hardcron(options.folder).start()
# ## if no password provided and havetk start Tk interface
# ## or start interface if we want to put in taskbar (system tray)
try:
options.taskbar
except:
options.taskbar = False
if options.taskbar and os.name != 'nt':
print 'Error: taskbar not supported on this platform'
sys.exit(1)
root = None
if not options.nogui:
try:
import Tkinter
havetk = True
except ImportError:
logger.warn(
'GUI not available because Tk library is not installed')
havetk = False
options.nogui = True
if options.password == '<ask>' and havetk or options.taskbar and havetk:
try:
root = Tkinter.Tk()
except:
pass
if root:
root.focus_force()
# Mac OS X - make the GUI window rise to the top
if os.path.exists("/usr/bin/osascript"):
applescript = """
tell application "System Events"
set proc to first process whose unix id is %d
set frontmost of proc to true
end tell
""" % (os.getpid())
os.system("/usr/bin/osascript -e '%s'" % applescript)
if not options.quiet:
presentation(root)
master = web2pyDialog(root, options)
signal.signal(signal.SIGTERM, lambda a, b: master.quit())
try:
root.mainloop()
except:
master.quit()
sys.exit()
# ## if no tk and no password, ask for a password
if not root and options.password == '<ask>':
options.password = getpass.getpass('choose a password:')
if not options.password and not options.nobanner:
print 'no password, no admin interface'
# ##-X (if no tk, the widget takes care of it himself)
if not root and options.scheduler and options.with_scheduler:
t = threading.Thread(target=start_schedulers, args=(options,))
t.start()
# ## start server
# Use first interface IP and port if interfaces specified, since the
# interfaces option overrides the IP (and related) options.
if not options.interfaces:
(ip, port) = (options.ip, int(options.port))
else:
first_if = options.interfaces[0]
(ip, port) = first_if[0], first_if[1]
# Check for non default value for ssl inputs
if (len(options.ssl_certificate) > 0) or (len(options.ssl_private_key) > 0):
proto = 'https'
else:
proto = 'http'
url = get_url(ip, proto=proto, port=port)
if not options.nobanner:
print 'please visit:'
print '\t', url
print 'use "kill -SIGTERM %i" to shutdown the web2py server' % os.getpid()
# enhance linecache.getline (used by debugger) to look at the source file
# if the line was not found (under py2exe & when file was modified)
import linecache
py2exe_getline = linecache.getline
def getline(filename, lineno, *args, **kwargs):
line = py2exe_getline(filename, lineno, *args, **kwargs)
if not line:
f = open(filename, "r")
try:
for i, line in enumerate(f):
if lineno == i + 1:
break
else:
line = None
finally:
f.close()
return line
linecache.getline = getline
server = main.HttpServer(ip=ip,
port=port,
password=options.password,
pid_filename=options.pid_filename,
log_filename=options.log_filename,
profiler_dir=options.profiler_dir,
ssl_certificate=options.ssl_certificate,
ssl_private_key=options.ssl_private_key,
ssl_ca_certificate=options.ssl_ca_certificate,
min_threads=options.minthreads,
max_threads=options.maxthreads,
server_name=options.server_name,
request_queue_size=options.request_queue_size,
timeout=options.timeout,
socket_timeout=options.socket_timeout,
shutdown_timeout=options.shutdown_timeout,
path=options.folder,
interfaces=options.interfaces)
try:
server.start()
except KeyboardInterrupt:
server.stop()
try:
t.join()
except:
pass
logging.shutdown()
|
auto_scaler.py
|
#!/usr/bin/env impala-python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import time
import logging
import os
import pipes
from subprocess import check_call
from tests.common.impala_cluster import ImpalaCluster
from threading import Event, Thread
IMPALA_HOME = os.environ["IMPALA_HOME"]
class AutoScaler(object):
"""This class implements a simple autoscaling algorithm: if queries queue up for a
configurable duration, a new executor group is started. Likewise, if the number of
concurrently running queries indicated that an executor group can be removed, such
measure is taken.
Users of this class can start an auto scaler by calling start() and must call stop()
before exiting (see main() below for an example).
This class only uses the default admission control pool.
"""
DEFAULT_POOL_NAME = "default-pool"
def __init__(self, executor_slots, group_size, start_batch_size=0, max_groups=0,
wait_up_s=0, wait_down_s=0, coordinator_slots=128):
# Number of queries that can run concurrently on each executor
self.executor_slots = executor_slots
self.coordinator_slots = coordinator_slots
# Number of executors per executor group
self.group_size = group_size
# New executor groups will be started in increments of this size
self.start_batch_size = group_size
if start_batch_size > 0:
self.start_batch_size = start_batch_size
# Maximum number of executor groups. We only have 10 TCP ports free on our
# miniclusters and we need one for the dedicated coordinator.
self.max_groups = 9 / self.group_size
# max_groups can further bound the maximum number of groups we are going to start,
# but we won't start more than possible.
if max_groups > 0 and max_groups < self.max_groups:
self.max_groups = max_groups
# Number of seconds to wait before scaling up/down
self.scale_wait_up_s = 5
if wait_up_s > 0:
self.scale_wait_up_s = wait_up_s
self.scale_wait_down_s = 5
if wait_down_s > 0:
self.scale_wait_down_s = wait_down_s
self.groups = []
self.num_groups = 0
# Stopwatches to track how long the conditions for scaling up/down have been met.
self.scale_up_sw = time.time()
self.scale_down_sw = time.time()
self.loop_thread = None
# Event to signal that the control loop should exit
self.stop_ev = Event()
def get_cluster(self):
return ImpalaCluster.get_e2e_test_cluster()
def get_coordinator(self):
cluster = self.get_cluster()
assert len(cluster.impalads) > 0
return cluster.get_first_impalad()
def get_service(self):
return self.get_coordinator().service
def get_client(self):
return self.get_coordinator().service.create_hs2_client()
def group_name(self, idx):
# By convention, group names must start with their associated resource pool name
# followed by a "-".
return "%s-group-%s" % (self.DEFAULT_POOL_NAME, idx)
def start_base_cluster(self):
"""Starts the base cluster consisting of an exclusive coordinator, catalog, and
statestore. Does not add any executors."""
logging.info("Starting base cluster (coordinator, catalog, statestore)")
cluster_args = ["--impalad_args=-executor_groups=coordinator"]
self._start_impala_cluster(cluster_args, cluster_size=1,
executor_slots=self.coordinator_slots,
expected_num_executors=0, add_executors=False)
logging.info("Done, number of running executor groups: %s" % self.num_groups)
def start_group(self):
"""Starts an executor group. The name of the group is automatically determined based
on the current number of total executor groups. Executors in the group will be started
in batches."""
self.num_groups += 1
name = self.group_name(self.num_groups)
desc = "%s:%s" % (name, self.group_size)
logging.info("Starting executor group %s with %s members" % (name, self.group_size))
cluster_args = ["--impalad_args=-executor_groups=%s" % desc]
batch_size = self.start_batch_size
num_started = 0
num_expected = (self.num_groups - 1) * self.group_size
while (num_started < self.group_size):
to_start = min(batch_size, self.group_size - num_started)
num_expected += to_start
if to_start == 1:
start_msg = "Starting executor %s" % (num_started + 1)
else:
start_msg = "Starting executors %s-%s" % (num_started + 1,
num_started + to_start)
logging.info(start_msg)
self._start_impala_cluster(cluster_args, cluster_size=to_start,
executor_slots=self.executor_slots,
expected_num_executors=num_expected, add_executors=True)
num_started += to_start
logging.info("Done, number of running executor groups: %s" % self.num_groups)
def stop_group(self):
"""Stops the executor group that was added last."""
name = self.group_name(self.num_groups)
group_hosts = self.get_groups()[name]
logging.info("Stopping executor group %s" % name)
for host in group_hosts:
logging.debug("Stopping host %s" % host)
query = ":shutdown('%s');" % host
self.execute(query)
self.wait_for_group_gone(name)
self.num_groups -= 1
logging.info("Done, number of running executor groups: %s" % self.num_groups)
def wait_for_group_gone(self, group_name, timeout=120):
"""Waits until all executors in group 'group_name' have unregistered themselves from
the coordinator's cluster membership view."""
end = time.time() + timeout
while time.time() < end:
groups = self.get_groups()
if group_name not in groups:
return
time.sleep(0.5)
assert False, "Timeout waiting for group %s to shut down" % group_name
def get_groups(self):
return self.get_service().get_executor_groups()
def execute(self, query):
return self.get_client().execute(query)
def get_num_queued_queries(self):
"""Returns the number of queries currently queued in the default pool on the
coordinator."""
return self.get_service().get_num_queued_queries(pool_name=self.DEFAULT_POOL_NAME)
def get_num_running_queries(self):
"""Returns the number of queries currently queued in the default pool on the
coordinator."""
return self.get_service().get_num_running_queries(self.DEFAULT_POOL_NAME)
def loop(self):
"""Controls whether new executor groups need to be started or existing ones need to be
stopped, based on the number of queries that are currently queued and running.
"""
while not self.stop_ev.is_set():
now = time.time()
num_queued = self.get_num_queued_queries()
num_running = self.get_num_running_queries()
capacity = self.executor_slots * self.num_groups
logging.debug("queued: %s, running: %s, capacity: %s" % (num_queued, num_running,
capacity))
if num_queued == 0:
self.scale_up_sw = now
scale_up = self.scale_up_sw < now - self.scale_wait_up_s
if scale_up and self.num_groups < self.max_groups:
self.start_group()
self.scale_up_sw = time.time()
self.scale_down_sw = self.scale_up_sw
continue
surplus = capacity - num_running
if surplus < self.executor_slots:
self.scale_down_sw = now
if self.scale_down_sw < now - self.scale_wait_down_s:
self.stop_group()
self.scale_up_sw = time.time()
self.scale_down_sw = self.scale_up_sw
continue
time.sleep(1)
def start(self):
"""Starts a base cluster with coordinator and statestore and the control loop to start
and stop additional executor groups."""
self.start_base_cluster()
assert self.loop_thread is None
self.loop_thread = Thread(target=self.loop)
self.loop_thread.start()
def stop(self):
"""Stops the AutoScaler and its cluster."""
if self.stop_ev.is_set():
return
self.stop_ev.set()
if self.loop_thread:
self.loop_thread.join()
self.loop_thread = None
self._kill_whole_cluster()
def _start_impala_cluster(self, options, cluster_size, executor_slots,
expected_num_executors, add_executors):
"""Starts an Impala cluster and waits for all impalads to come online.
If 'add_executors' is True, new executors will be added to the cluster and the
existing daemons will not be restarted. In that case 'cluster_size' must specify the
number of nodes that will be added and 'expected_num_executors' must be the total
expected number of executors after the additional ones have started.
If 'add_executors' is false, 'cluster_size' must be 1 and a single exclusive
coordinator will be started (together with catalog and statestore).
"""
assert cluster_size > 0, "cluster_size cannot be 0"
impala_log_dir = os.getenv("LOG_DIR", "/tmp/")
cmd = [os.path.join(IMPALA_HOME, "bin/start-impala-cluster.py"),
"--cluster_size=%d" % cluster_size,
"--log_dir=%s" % impala_log_dir,
"--log_level=1"]
if add_executors:
cmd.append("--add_executors")
else:
assert expected_num_executors == 0
assert cluster_size == 1
cmd.append("--use_exclusive_coordinators")
impalad_args = [
"-vmodule=admission-controller=3,cluster-membership-mgr=3",
"-admission_control_slots=%s" % executor_slots,
"-shutdown_grace_period_s=2"]
options += ["--impalad_args=%s" % a for a in impalad_args]
logging.debug("Starting cluster with command: %s" %
" ".join(pipes.quote(arg) for arg in cmd + options))
log_debug = logging.getLogger().getEffectiveLevel() == logging.DEBUG
log_file = None
if not log_debug:
log_file = open("/dev/null", "w")
check_call(cmd + options, close_fds=True, stdout=log_file, stderr=log_file)
# The number of statestore subscribers is
# cluster_size (# of impalad) + 1 (for catalogd).
if expected_num_executors > 0:
expected_subscribers = expected_num_executors + 2
expected_backends = expected_num_executors + 1
else:
expected_subscribers = cluster_size + 1
expected_backends = 1
cluster = self.get_cluster()
statestored = cluster.statestored
if statestored is None:
raise Exception("statestored was not found")
logging.debug("Waiting for %s subscribers to come online" % expected_subscribers)
statestored.service.wait_for_live_subscribers(expected_subscribers, timeout=60)
for impalad in cluster.impalads:
logging.debug("Waiting for %s executors to come online" % expected_backends)
impalad.service.wait_for_num_known_live_backends(expected_backends, timeout=60)
def _kill_whole_cluster(self):
"""Terminates the whole cluster, i.e. all impalads, catalogd, and statestored."""
logging.info("terminating cluster")
check_call([os.path.join(IMPALA_HOME, "bin/start-impala-cluster.py"), "--kill_only"])
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--executor_slots", help="Concurrent queries per executor "
"group", type=int, default=3)
parser.add_argument("-g", "--group_size", help="Number of executors per group",
type=int, default=2)
parser.add_argument("-b", "--batch_size", help="Start executors of a group "
"in batches instead of all at once", type=int, default=0)
parser.add_argument("-m", "--max_groups", help="Maximum number of groups to start",
type=int, default=0)
parser.add_argument("-d", "--wait_down", help="Time to wait before scaling down (s)",
type=int, default=5)
parser.add_argument("-u", "--wait_up", help="Time to wait before scaling up (s)",
type=int, default=5)
parser.add_argument("-v", "--verbose", help="Verbose logging", action="store_true")
args = parser.parse_args()
# Restrict some logging for command line usage
logging.getLogger("impala_cluster").setLevel(logging.INFO)
logging.getLogger("requests").setLevel(logging.WARNING)
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
logging.getLogger("impala.hiveserver2").setLevel(logging.INFO)
else:
logging.basicConfig(level=logging.INFO)
# Also restrict other modules' debug output
logging.getLogger("impala_connection").setLevel(logging.WARNING)
logging.getLogger("impala_service").setLevel(logging.WARNING)
logging.getLogger("impala.hiveserver2").setLevel(logging.WARNING)
a = AutoScaler(executor_slots=args.executor_slots, group_size=args.group_size,
start_batch_size=args.batch_size, max_groups=args.max_groups,
wait_up_s=args.wait_up, wait_down_s=args.wait_down)
a.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
logging.debug("Caught KeyboardInterrupt, stopping autoscaler")
a.stop()
if __name__ == "__main__":
main()
|
02_grundlagen_multithreading.py
|
# 02_grundlagen_multithreading.py
import os, re, time, datetime, threading
max_text_length=70
max_text_delta=24
def output(title, string):
print('╔'+''.center(max_text_length+8, '═')+'╗')
print('║ '+title.center(max_text_length+7).upper()+'║')
print('╠'+''.center(max_text_length+8, '═')+'╣')
string=string+' '*max_text_length
search_pattern=re.compile(r'\w+.{'+str(max_text_length-max_text_delta-7)+r','+str(max_text_length-7)+r'}[ |.|,|\n|>|\W]', re.DOTALL)
results=search_pattern.findall(string)
for line in results:
print('║ '+line.strip()+'║'.rjust(max_text_length+8-len(line.strip())))
print('╚'+''.center(max_text_length+8, '═')+'╝')
input()
def say_hello():
time.sleep(2)
print('Dieser print wurde durch Multithreading erzeugt.')
output('Was ist Multithreading?', 'Alle bisher angewendeten Scripts waren single threaded da sie nur immer eine Zeile nach der anderen abgearbeitet hat. Python liest den Code also nur mit einem Finger. Mit Multithreading kann man weitere Finger hinzufügen. Heisst es können Teile vom Code simultan und/oder versetzt ausgeführt werden. Dazu wird das Modul threading benötigt.')
thread_object=threading.Thread(target=say_hello)
thread_object.start()
output('threading.Thread()', 'Mit threading.Thread lässt sich eine Funktion als zweitprozess ausführen. Beispiel mit der vorhandenen Funktion say_hello(): threading.Thread(target=say_hello) damit wird die Funktion say_hello() neben dem Laufenden Code ausgeführt.')
output('Funktionen mit Argumenten', 'Mit Multithreading können auch Funktionen als Ziel angegeben werden welche Argumente benötigen. Beispiel print-Funktion: threading.Thread(target=print, args=["Cats", "Dogs", "Frogs",], kwargs={"sep": " & "})')
threading.Thread(target=print, args=["Cats", "Dogs", "Frogs",], kwargs={'sep': ' & '}).start()
time.sleep(0.5)
output('Lesen/Schreiben', 'Das Problem beim Multithreading ist wenn mehrere Prozesse versuchen die selbe Variabel zu lesen oder zu schreiben. Dann kann es zu Konflikten kommen die nur schwer zu erkennen sind, daher sollte man darauf auchten dass die unterschiedlichen Threads nicht versuchen gleichzeitig auf eine Variable zuzugreiffen.')
|
test_main_loop.py
|
import os
import signal
import time
from itertools import count
from multiprocessing import Process
from fuel.datasets import IterableDataset
from mock import MagicMock, ANY
from numpy.testing import assert_raises
from six.moves import cPickle
from blocks.main_loop import MainLoop
from blocks.extensions import TrainingExtension, FinishAfter, Printing
from blocks.utils import unpack
from blocks.config import config
from blocks.utils.testing import MockAlgorithm, MockMainLoop
class WriteBatchExtension(TrainingExtension):
"""Writes data saved by MockAlgorithm to the log."""
def after_batch(self, _):
self.main_loop.log.current_row['batch'] = \
self.main_loop.algorithm.batch
def test_main_loop():
old_config_profile_value = config.profile
config.profile = True
main_loop = MainLoop(
MockAlgorithm(), IterableDataset(range(10)).get_example_stream(),
extensions=[WriteBatchExtension(), FinishAfter(after_n_epochs=2)])
main_loop.run()
assert_raises(AttributeError, getattr, main_loop, 'model')
assert main_loop.log.status['iterations_done'] == 20
assert main_loop.log.status['_epoch_ends'] == [10, 20]
assert len(main_loop.log) == 20
for i in range(20):
assert main_loop.log[i + 1]['batch'] == {'data': i % 10}
config.profile = old_config_profile_value
def test_training_resumption():
def do_test(with_serialization):
data_stream = IterableDataset(range(10)).get_example_stream()
main_loop = MainLoop(
MockAlgorithm(), data_stream,
extensions=[WriteBatchExtension(),
FinishAfter(after_n_batches=14)])
main_loop.run()
assert main_loop.log.status['iterations_done'] == 14
if with_serialization:
main_loop = cPickle.loads(cPickle.dumps(main_loop))
finish_after = unpack(
[ext for ext in main_loop.extensions
if isinstance(ext, FinishAfter)], singleton=True)
finish_after.add_condition(
["after_batch"],
predicate=lambda log: log.status['iterations_done'] == 27)
main_loop.run()
assert main_loop.log.status['iterations_done'] == 27
assert main_loop.log.status['epochs_done'] == 2
for i in range(27):
assert main_loop.log[i + 1]['batch'] == {"data": i % 10}
do_test(False)
do_test(True)
def test_training_interrupt():
def process_batch(batch):
time.sleep(0.1)
algorithm = MockAlgorithm()
algorithm.process_batch = process_batch
main_loop = MockMainLoop(
algorithm=algorithm,
data_stream=IterableDataset(count()).get_example_stream(),
extensions=[Printing()]
)
p = Process(target=main_loop.run)
p.start()
time.sleep(0.1)
os.kill(p.pid, signal.SIGINT)
time.sleep(0.1)
assert p.is_alive()
os.kill(p.pid, signal.SIGINT)
time.sleep(0.2)
assert not p.is_alive()
p.join()
def test_error():
ext = TrainingExtension()
ext.after_batch = MagicMock(side_effect=KeyError)
ext.on_error = MagicMock()
main_loop = MockMainLoop(extensions=[ext, FinishAfter(after_epoch=True)])
assert_raises(KeyError, main_loop.run)
ext.on_error.assert_called_once_with(ANY)
assert 'got_exception' in main_loop.log.current_row
ext.on_error = MagicMock(side_effect=AttributeError)
main_loop = MockMainLoop(extensions=[ext, FinishAfter(after_epoch=True)])
assert_raises(KeyError, main_loop.run)
ext.on_error.assert_called_once_with(ANY)
assert 'got_exception' in main_loop.log.current_row
|
mesh_pool.py
|
import torch
import torch.nn as nn
from threading import Thread
from models.layers.mesh_union import MeshUnion
import numpy as np
from heapq import heappop, heapify
class MeshPool(nn.Module):
def __init__(self, target, multi_thread=False):
super(MeshPool, self).__init__()
self.__out_target = target
self.__multi_thread = multi_thread
self.__fe = None
self.__updated_fe = None
self.__meshes = None
self.__merge_edges = [-1, -1]
def __call__(self, fe, meshes):
return self.forward(fe, meshes)
def forward(self, fe, meshes):
self.__updated_fe = [[] for _ in range(len(meshes))]
pool_threads = []
self.__fe = fe
self.__meshes = meshes
# iterate over batch
for mesh_index in range(len(meshes)):
if self.__multi_thread:
pool_threads.append(Thread(target=self.__pool_main, args=(mesh_index,)))
pool_threads[-1].start()
else:
self.__pool_main(mesh_index)
if self.__multi_thread:
for mesh_index in range(len(meshes)):
pool_threads[mesh_index].join()
out_features = torch.cat(self.__updated_fe).view(len(meshes), -1, self.__out_target)
return out_features
def __pool_main(self, mesh_index):
mesh = self.__meshes[mesh_index]
queue = self.__build_queue(self.__fe[mesh_index, :, :mesh.edges_count], mesh.edges_count)
#print("Pool init: queue size {}, mesh edges {}, target {}".format(len(queue), mesh.edges_count, self.__out_target))
# recycle = []
# last_queue_len = len(queue)
last_count = mesh.edges_count + 1
mask = np.ones(mesh.edges_count, dtype=np.bool)
edge_groups = MeshUnion(mesh.edges_count, self.__fe.device)
while mesh.edges_count > self.__out_target:
if (len(queue) == 0):
print("Out of edges for {}, current size {}".format(mesh.filename, mesh.edges_count))
value, edge_id = heappop(queue)
edge_id = int(edge_id)
if mask[edge_id]:
self.__pool_edge(mesh, edge_id, mask, edge_groups)
mesh.clean(mask, edge_groups)
fe = edge_groups.rebuild_features(self.__fe[mesh_index], mask, self.__out_target)
self.__updated_fe[mesh_index] = fe
def __pool_edge(self, mesh, edge_id, mask, edge_groups):
if self.has_boundaries(mesh, edge_id):
return False
elif self.__clean_side(mesh, edge_id, mask, edge_groups, 0)\
and self.__clean_side(mesh, edge_id, mask, edge_groups, 2) \
and self.__is_one_ring_valid(mesh, edge_id):
self.__merge_edges[0] = self.__pool_side(mesh, edge_id, mask, edge_groups, 0)
self.__merge_edges[1] = self.__pool_side(mesh, edge_id, mask, edge_groups, 2)
mesh.merge_vertices(edge_id)
mask[edge_id] = False
MeshPool.__remove_group(mesh, edge_groups, edge_id)
mesh.edges_count -= 1
return True
else:
return False
def __clean_side(self, mesh, edge_id, mask, edge_groups, side):
if mesh.edges_count <= self.__out_target:
return False
invalid_edges = MeshPool.__get_invalids(mesh, edge_id, edge_groups, side)
while len(invalid_edges) != 0 and mesh.edges_count > self.__out_target:
self.__remove_triplete(mesh, mask, edge_groups, invalid_edges)
if mesh.edges_count <= self.__out_target:
return False
if self.has_boundaries(mesh, edge_id):
return False
invalid_edges = self.__get_invalids(mesh, edge_id, edge_groups, side)
return True
@staticmethod
def has_boundaries(mesh, edge_id):
for edge in mesh.gemm_edges[edge_id]:
if edge == -1 or -1 in mesh.gemm_edges[edge]:
return True
return False
@staticmethod
def __is_one_ring_valid(mesh, edge_id):
v_a = set(mesh.edges[mesh.ve[mesh.edges[edge_id, 0]]].reshape(-1))
v_b = set(mesh.edges[mesh.ve[mesh.edges[edge_id, 1]]].reshape(-1))
shared = v_a & v_b - set(mesh.edges[edge_id])
return len(shared) == 2
def __pool_side(self, mesh, edge_id, mask, edge_groups, side):
info = MeshPool.__get_face_info(mesh, edge_id, side)
key_a, key_b, side_a, side_b, _, other_side_b, _, other_keys_b = info
self.__redirect_edges(mesh, key_a, side_a - side_a % 2, other_keys_b[0], mesh.sides[key_b, other_side_b])
self.__redirect_edges(mesh, key_a, side_a - side_a % 2 + 1, other_keys_b[1], mesh.sides[key_b, other_side_b + 1])
MeshPool.__union_groups(mesh, edge_groups, key_b, key_a)
MeshPool.__union_groups(mesh, edge_groups, edge_id, key_a)
mask[key_b] = False
MeshPool.__remove_group(mesh, edge_groups, key_b)
mesh.remove_edge(key_b)
mesh.edges_count -= 1
return key_a
@staticmethod
def __get_invalids(mesh, edge_id, edge_groups, side):
info = MeshPool.__get_face_info(mesh, edge_id, side)
key_a, key_b, side_a, side_b, other_side_a, other_side_b, other_keys_a, other_keys_b = info
shared_items = MeshPool.__get_shared_items(other_keys_a, other_keys_b)
if len(shared_items) == 0:
return []
else:
assert (len(shared_items) == 2)
middle_edge = other_keys_a[shared_items[0]]
update_key_a = other_keys_a[1 - shared_items[0]]
update_key_b = other_keys_b[1 - shared_items[1]]
update_side_a = mesh.sides[key_a, other_side_a + 1 - shared_items[0]]
update_side_b = mesh.sides[key_b, other_side_b + 1 - shared_items[1]]
MeshPool.__redirect_edges(mesh, edge_id, side, update_key_a, update_side_a)
MeshPool.__redirect_edges(mesh, edge_id, side + 1, update_key_b, update_side_b)
MeshPool.__redirect_edges(mesh, update_key_a, MeshPool.__get_other_side(update_side_a), update_key_b, MeshPool.__get_other_side(update_side_b))
MeshPool.__union_groups(mesh, edge_groups, key_a, edge_id)
MeshPool.__union_groups(mesh, edge_groups, key_b, edge_id)
MeshPool.__union_groups(mesh, edge_groups, key_a, update_key_a)
MeshPool.__union_groups(mesh, edge_groups, middle_edge, update_key_a)
MeshPool.__union_groups(mesh, edge_groups, key_b, update_key_b)
MeshPool.__union_groups(mesh, edge_groups, middle_edge, update_key_b)
return [key_a, key_b, middle_edge]
@staticmethod
def __redirect_edges(mesh, edge_a_key, side_a, edge_b_key, side_b):
mesh.gemm_edges[edge_a_key, side_a] = edge_b_key
mesh.gemm_edges[edge_b_key, side_b] = edge_a_key
mesh.sides[edge_a_key, side_a] = side_b
mesh.sides[edge_b_key, side_b] = side_a
@staticmethod
def __get_shared_items(list_a, list_b):
shared_items = []
for i in range(len(list_a)):
for j in range(len(list_b)):
if list_a[i] == list_b[j]:
shared_items.extend([i, j])
return shared_items
@staticmethod
def __get_other_side(side):
return side + 1 - 2 * (side % 2)
@staticmethod
def __get_face_info(mesh, edge_id, side):
key_a = mesh.gemm_edges[edge_id, side]
key_b = mesh.gemm_edges[edge_id, side + 1]
side_a = mesh.sides[edge_id, side]
side_b = mesh.sides[edge_id, side + 1]
other_side_a = (side_a - (side_a % 2) + 2) % 4
other_side_b = (side_b - (side_b % 2) + 2) % 4
other_keys_a = [mesh.gemm_edges[key_a, other_side_a], mesh.gemm_edges[key_a, other_side_a + 1]]
other_keys_b = [mesh.gemm_edges[key_b, other_side_b], mesh.gemm_edges[key_b, other_side_b + 1]]
return key_a, key_b, side_a, side_b, other_side_a, other_side_b, other_keys_a, other_keys_b
@staticmethod
def __remove_triplete(mesh, mask, edge_groups, invalid_edges):
vertex = set(mesh.edges[invalid_edges[0]])
for edge_key in invalid_edges:
vertex &= set(mesh.edges[edge_key])
mask[edge_key] = False
MeshPool.__remove_group(mesh, edge_groups, edge_key)
mesh.edges_count -= 3
vertex = list(vertex)
assert len(vertex) == 1, "mesh {}".format(mesh.filename)
mesh.remove_vertex(vertex[0])
def __build_queue(self, features, edges_count):
# delete edges with smallest norm
squared_magnitude = torch.sum(features * features, 0)
if squared_magnitude.shape[-1] != 1:
squared_magnitude = squared_magnitude.unsqueeze(-1)
edge_ids = torch.arange(edges_count, device=squared_magnitude.device, dtype=torch.float32).unsqueeze(-1)
heap = torch.cat((squared_magnitude, edge_ids), dim=-1).tolist()
heapify(heap)
return heap
@staticmethod
def __union_groups(mesh, edge_groups, source, target):
edge_groups.union(source, target)
mesh.union_groups(source, target)
@staticmethod
def __remove_group(mesh, edge_groups, index):
edge_groups.remove_group(index)
mesh.remove_group(index)
|
ProxyRefreshSchedule.py
|
# -*- coding: utf-8 -*-
# !/usr/bin/env python
"""
-------------------------------------------------
File Name: ProxyRefreshSchedule.py
Description : 代理定时刷新
Author : JHao
date: 2016/12/4
-------------------------------------------------
Change Activity:
2016/12/4: 代理定时刷新
2017/03/06: 使用LogHandler添加日志
2017/04/26: raw_proxy_queue验证通过但useful_proxy_queue中已经存在的代理不在放入
-------------------------------------------------
"""
import sys
import time
import logging
from threading import Thread
from apscheduler.schedulers.background import BackgroundScheduler
sys.path.append('../')
from Util.utilFunction import validUsefulProxy
from Manager.ProxyManager import ProxyManager
from Util.LogHandler import LogHandler
__author__ = 'JHao'
logging.basicConfig()
class ProxyRefreshSchedule(ProxyManager):
"""
代理定时刷新
"""
def __init__(self):
ProxyManager.__init__(self)
self.log = LogHandler('refresh_schedule')
def validProxy(self):
"""
验证raw_proxy_queue中的代理, 将可用的代理放入useful_proxy_queue
:return:
"""
self.db.changeTable(self.raw_proxy_queue)
raw_proxy_item = self.db.pop()
#self.log.info('[ProxyRefreshSchedule]RAW proxy valid check start.time:%s' % time.ctime())
# 计算剩余代理,用来减少重复计算
remaining_proxies = self.getAll()
while raw_proxy_item:
raw_proxy = raw_proxy_item.get('proxy')
if isinstance(raw_proxy, bytes):
# 兼容Py3
raw_proxy = raw_proxy.decode('utf8')
if (raw_proxy not in remaining_proxies) and validUsefulProxy(raw_proxy):
self.db.changeTable(self.useful_proxy_queue)
self.db.put(raw_proxy)
#self.log.info('[ProxyRefreshSchedule]RAW proxy: %s validation pass' % raw_proxy)
else:
self.log.info('[ProxyRefreshSchedule]:RAW proxy: %s validation fail' % raw_proxy)
self.db.changeTable(self.raw_proxy_queue)
raw_proxy_item = self.db.pop()
remaining_proxies = self.getAll()
#self.log.info('[ProxyRefreshSchedule]RAW proxy valid check complete.time:%s' % time.ctime())
def refreshPool():
pp = ProxyRefreshSchedule()
pp.validProxy()
def batchRefresh(process_num=100):
# 检验新代理
pl = []
for num in range(process_num):
proc = Thread(target=refreshPool, args=())
pl.append(proc)
for num in range(process_num):
pl[num].daemon = True
pl[num].start()
for num in range(process_num):
pl[num].join()
def fetchAll():
p = ProxyRefreshSchedule()
# 获取新代理
p.refresh()
def run():
scheduler = BackgroundScheduler()
# 不用太快, 网站更新速度比较慢, 太快会加大验证压力, 导致raw_proxy积压
scheduler.add_job(fetchAll, 'interval', minutes=0.2, id="fetch_proxy")
scheduler.add_job(batchRefresh, "interval", minutes=0.1) # 每分钟检查一次
scheduler.start()
fetchAll()
while True:
time.sleep(3)
if __name__ == '__main__':
run()
|
test_ipc.py
|
import sys
import multiprocessing as mp
import traceback
import pickle
import numpy as np
from numba import cuda
from numba.cuda.cudadrv import drvapi, devicearray
from numba.cuda.testing import skip_on_cudasim, CUDATestCase
import unittest
not_linux = not sys.platform.startswith('linux')
has_mp_get_context = hasattr(mp, 'get_context')
def core_ipc_handle_test(the_work, result_queue):
try:
arr = the_work()
except:
# FAILED. propagate the exception as a string
succ = False
out = traceback.format_exc()
else:
# OK. send the ndarray back
succ = True
out = arr
result_queue.put((succ, out))
def base_ipc_handle_test(handle, size, result_queue):
def the_work():
dtype = np.dtype(np.intp)
with cuda.open_ipc_array(handle, shape=size // dtype.itemsize,
dtype=dtype) as darr:
# copy the data to host
return darr.copy_to_host()
core_ipc_handle_test(the_work, result_queue)
def serialize_ipc_handle_test(handle, result_queue):
def the_work():
dtype = np.dtype(np.intp)
darr = handle.open_array(cuda.current_context(),
shape=handle.size // dtype.itemsize,
dtype=dtype)
# copy the data to host
arr = darr.copy_to_host()
handle.close()
return arr
core_ipc_handle_test(the_work, result_queue)
def ipc_array_test(ipcarr, result_queue):
try:
with ipcarr as darr:
arr = darr.copy_to_host()
try:
# should fail to reopen
with ipcarr:
pass
except ValueError as e:
if str(e) != 'IpcHandle is already opened':
raise AssertionError('invalid exception message')
else:
raise AssertionError('did not raise on reopen')
except:
# FAILED. propagate the exception as a string
succ = False
out = traceback.format_exc()
else:
# OK. send the ndarray back
succ = True
out = arr
result_queue.put((succ, out))
@unittest.skipIf(not_linux, "IPC only supported on Linux")
@unittest.skipUnless(has_mp_get_context, "requires multiprocessing.get_context")
@skip_on_cudasim('Ipc not available in CUDASIM')
class TestIpcMemory(CUDATestCase):
def test_ipc_handle(self):
# prepare data for IPC
arr = np.arange(10, dtype=np.intp)
devarr = cuda.to_device(arr)
# create IPC handle
ctx = cuda.current_context()
ipch = ctx.get_ipc_handle(devarr.gpu_data)
# manually prepare for serialization as bytes
handle_bytes = bytes(ipch.handle)
size = ipch.size
# spawn new process for testing
ctx = mp.get_context('spawn')
result_queue = ctx.Queue()
args = (handle_bytes, size, result_queue)
proc = ctx.Process(target=base_ipc_handle_test, args=args)
proc.start()
succ, out = result_queue.get()
if not succ:
self.fail(out)
else:
np.testing.assert_equal(arr, out)
proc.join(3)
def check_ipc_handle_serialization(self, index_arg=None):
# prepare data for IPC
arr = np.arange(10, dtype=np.intp)
devarr = cuda.to_device(arr)
if index_arg is not None:
devarr = devarr[index_arg]
expect = devarr.copy_to_host()
# create IPC handle
ctx = cuda.current_context()
ipch = ctx.get_ipc_handle(devarr.gpu_data)
# pickle
buf = pickle.dumps(ipch)
ipch_recon = pickle.loads(buf)
self.assertIs(ipch_recon.base, None)
self.assertEqual(tuple(ipch_recon.handle), tuple(ipch.handle))
self.assertEqual(ipch_recon.size, ipch.size)
# spawn new process for testing
ctx = mp.get_context('spawn')
result_queue = ctx.Queue()
args = (ipch, result_queue)
proc = ctx.Process(target=serialize_ipc_handle_test, args=args)
proc.start()
succ, out = result_queue.get()
if not succ:
self.fail(out)
else:
np.testing.assert_equal(expect, out)
proc.join(3)
def test_ipc_handle_serialization(self):
# test no slicing
self.check_ipc_handle_serialization()
# slicing tests
self.check_ipc_handle_serialization(slice(3, None))
self.check_ipc_handle_serialization(slice(3, 8))
self.check_ipc_handle_serialization(slice(None, 8))
def check_ipc_array(self, index_arg=None):
# prepare data for IPC
arr = np.arange(10, dtype=np.intp)
devarr = cuda.to_device(arr)
# Slice
if index_arg is not None:
devarr = devarr[index_arg]
expect = devarr.copy_to_host()
ipch = devarr.get_ipc_handle()
# spawn new process for testing
ctx = mp.get_context('spawn')
result_queue = ctx.Queue()
args = (ipch, result_queue)
proc = ctx.Process(target=ipc_array_test, args=args)
proc.start()
succ, out = result_queue.get()
if not succ:
self.fail(out)
else:
np.testing.assert_equal(expect, out)
proc.join(3)
def test_ipc_array(self):
# test no slicing
self.check_ipc_array()
# slicing tests
self.check_ipc_array(slice(3, None))
self.check_ipc_array(slice(3, 8))
self.check_ipc_array(slice(None, 8))
@unittest.skipUnless(not_linux, "Only on OS other than Linux")
@skip_on_cudasim('Ipc not available in CUDASIM')
class TestIpcNotSupported(CUDATestCase):
def test_unsupported(self):
arr = np.arange(10, dtype=np.intp)
devarr = cuda.to_device(arr)
with self.assertRaises(OSError) as raises:
devarr.get_ipc_handle()
errmsg = str(raises.exception)
self.assertIn('OS does not support CUDA IPC', errmsg)
def staged_ipc_handle_test(handle, device_num, result_queue):
def the_work():
with cuda.gpus[device_num]:
this_ctx = cuda.devices.get_context()
can_access = handle.can_access_peer(this_ctx)
print('can_access_peer {} {}'.format(this_ctx, can_access))
deviceptr = handle.open_staged(this_ctx)
arrsize = handle.size // np.dtype(np.intp).itemsize
hostarray = np.zeros(arrsize, dtype=np.intp)
cuda.driver.device_to_host(
hostarray, deviceptr, size=handle.size,
)
handle.close()
return hostarray
core_ipc_handle_test(the_work, result_queue)
def staged_ipc_array_test(ipcarr, device_num, result_queue):
try:
with cuda.gpus[device_num]:
this_ctx = cuda.devices.get_context()
print(this_ctx.device)
with ipcarr as darr:
arr = darr.copy_to_host()
try:
# should fail to reopen
with ipcarr:
pass
except ValueError as e:
if str(e) != 'IpcHandle is already opened':
raise AssertionError('invalid exception message')
else:
raise AssertionError('did not raise on reopen')
except:
# FAILED. propagate the exception as a string
succ = False
out = traceback.format_exc()
else:
# OK. send the ndarray back
succ = True
out = arr
result_queue.put((succ, out))
@unittest.skipIf(not_linux, "IPC only supported on Linux")
@unittest.skipUnless(has_mp_get_context, "requires multiprocessing.get_context")
@skip_on_cudasim('Ipc not available in CUDASIM')
class TestIpcStaged(CUDATestCase):
def test_staged(self):
# prepare data for IPC
arr = np.arange(10, dtype=np.intp)
devarr = cuda.to_device(arr)
# spawn new process for testing
mpctx = mp.get_context('spawn')
result_queue = mpctx.Queue()
# create IPC handle
ctx = cuda.current_context()
ipch = ctx.get_ipc_handle(devarr.gpu_data)
# pickle
buf = pickle.dumps(ipch)
ipch_recon = pickle.loads(buf)
self.assertIs(ipch_recon.base, None)
self.assertEqual(tuple(ipch_recon.handle), tuple(ipch.handle))
self.assertEqual(ipch_recon.size, ipch.size)
# Test on every CUDA devices
for device_num in range(len(cuda.gpus)):
args = (ipch, device_num, result_queue)
proc = mpctx.Process(target=staged_ipc_handle_test, args=args)
proc.start()
succ, out = result_queue.get()
proc.join(3)
if not succ:
self.fail(out)
else:
np.testing.assert_equal(arr, out)
def test_ipc_array(self):
for device_num in range(len(cuda.gpus)):
# prepare data for IPC
arr = np.random.random(10)
devarr = cuda.to_device(arr)
ipch = devarr.get_ipc_handle()
# spawn new process for testing
ctx = mp.get_context('spawn')
result_queue = ctx.Queue()
args = (ipch, device_num, result_queue)
proc = ctx.Process(target=staged_ipc_array_test, args=args)
proc.start()
succ, out = result_queue.get()
proc.join(3)
if not succ:
self.fail(out)
else:
np.testing.assert_equal(arr, out)
if __name__ == '__main__':
unittest.main()
|
__init__.py
|
__author__ = "Johannes Köster"
__copyright__ = "Copyright 2021, Johannes Köster"
__email__ = "johannes.koester@uni-due.de"
__license__ = "MIT"
import os
import sys
import contextlib
import time
import datetime
import json
import textwrap
import stat
import shutil
import shlex
import threading
import concurrent.futures
import subprocess
import signal
import tempfile
from functools import partial
from itertools import chain
from collections import namedtuple
from snakemake.io import _IOFile
import random
import base64
import uuid
import re
import math
from snakemake.jobs import Job
from snakemake.shell import shell
from snakemake.logging import logger
from snakemake.stats import Stats
from snakemake.utils import format, Unformattable, makedirs
from snakemake.io import get_wildcard_names, Wildcards
from snakemake.exceptions import print_exception, get_exception_origin
from snakemake.exceptions import format_error, RuleException, log_verbose_traceback
from snakemake.exceptions import (
ProtectedOutputException,
WorkflowError,
ImproperShadowException,
SpawnedJobError,
CacheMissException,
)
from snakemake.common import Mode, __version__, get_container_image, get_uuid
# TODO move each executor into a separate submodule
def sleep():
# do not sleep on CI. In that case we just want to quickly test everything.
if os.environ.get("CI") != "true":
time.sleep(10)
class AbstractExecutor:
def __init__(
self,
workflow,
dag,
printreason=False,
quiet=False,
printshellcmds=False,
printthreads=True,
latency_wait=3,
keepincomplete=False,
keepmetadata=True,
):
self.workflow = workflow
self.dag = dag
self.quiet = quiet
self.printreason = printreason
self.printshellcmds = printshellcmds
self.printthreads = printthreads
self.latency_wait = latency_wait
self.keepincomplete = keepincomplete
self.keepmetadata = keepmetadata
def get_default_remote_provider_args(self):
if self.workflow.default_remote_provider:
return (
" --default-remote-provider {} " "--default-remote-prefix {} "
).format(
self.workflow.default_remote_provider.__module__.split(".")[-1],
self.workflow.default_remote_prefix,
)
return ""
def _format_key_value_args(self, flag, kwargs):
if kwargs:
return " {} {} ".format(
flag,
" ".join("{}={}".format(key, value) for key, value in kwargs.items()),
)
return ""
def get_set_threads_args(self):
return self._format_key_value_args(
"--set-threads", self.workflow.overwrite_threads
)
def get_set_resources_args(self):
if self.workflow.overwrite_resources:
return " --set-resources {} ".format(
" ".join(
"{}:{}={}".format(rule, name, value)
for rule, res in self.workflow.overwrite_resources.items()
for name, value in res.items()
),
)
return ""
def get_set_scatter_args(self):
return self._format_key_value_args(
"--set-scatter", self.workflow.overwrite_scatter
)
def get_default_resources_args(self, default_resources=None):
if default_resources is None:
default_resources = self.workflow.default_resources
if default_resources:
def fmt(res):
if isinstance(res, str):
res = res.replace('"', r"\"")
return '"{}"'.format(res)
args = " --default-resources {} ".format(
" ".join(map(fmt, self.workflow.default_resources.args))
)
return args
return ""
def get_behavior_args(self):
if self.workflow.conda_not_block_search_path_envvars:
return " --conda-not-block-search-path-envvars "
return ""
def run_jobs(self, jobs, callback=None, submit_callback=None, error_callback=None):
"""Run a list of jobs that is ready at a given point in time.
By default, this method just runs each job individually.
This method can be overwritten to submit many jobs in a more efficient way than one-by-one.
Note that in any case, for each job, the callback functions have to be called individually!
"""
for job in jobs:
self.run(
job,
callback=callback,
submit_callback=submit_callback,
error_callback=error_callback,
)
def run(self, job, callback=None, submit_callback=None, error_callback=None):
"""Run a specific job or group job."""
self._run(job)
callback(job)
def shutdown(self):
pass
def cancel(self):
pass
def _run(self, job):
job.check_protected_output()
self.printjob(job)
def rule_prefix(self, job):
return "local " if job.is_local else ""
def printjob(self, job):
job.log_info(skip_dynamic=True)
def print_job_error(self, job, msg=None, **kwargs):
job.log_error(msg, **kwargs)
def handle_job_success(self, job):
pass
def handle_job_error(self, job):
pass
class DryrunExecutor(AbstractExecutor):
def printjob(self, job):
super().printjob(job)
if job.is_group():
for j in job.jobs:
self.printcache(j)
else:
self.printcache(job)
def printcache(self, job):
if self.workflow.is_cached_rule(job.rule):
if self.workflow.output_file_cache.exists(job):
logger.info(
"Output file {} will be obtained from global between-workflow cache.".format(
job.output[0]
)
)
else:
logger.info(
"Output file {} will be written to global between-workflow cache.".format(
job.output[0]
)
)
class RealExecutor(AbstractExecutor):
def __init__(
self,
workflow,
dag,
printreason=False,
quiet=False,
printshellcmds=False,
latency_wait=3,
assume_shared_fs=True,
keepincomplete=False,
keepmetadata=False,
):
super().__init__(
workflow,
dag,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
latency_wait=latency_wait,
keepincomplete=keepincomplete,
keepmetadata=keepmetadata,
)
self.assume_shared_fs = assume_shared_fs
self.stats = Stats()
self.snakefile = workflow.main_snakefile
def register_job(self, job):
job.register()
def _run(self, job, callback=None, error_callback=None):
super()._run(job)
self.stats.report_job_start(job)
try:
self.register_job(job)
except IOError as e:
logger.info(
"Failed to set marker file for job started ({}). "
"Snakemake will work, but cannot ensure that output files "
"are complete in case of a kill signal or power loss. "
"Please ensure write permissions for the "
"directory {}".format(e, self.workflow.persistence.path)
)
def handle_job_success(
self,
job,
upload_remote=True,
handle_log=True,
handle_touch=True,
ignore_missing_output=False,
):
job.postprocess(
upload_remote=upload_remote,
handle_log=handle_log,
handle_touch=handle_touch,
ignore_missing_output=ignore_missing_output,
latency_wait=self.latency_wait,
assume_shared_fs=self.assume_shared_fs,
keep_metadata=self.keepmetadata,
)
self.stats.report_job_end(job)
def handle_job_error(self, job, upload_remote=True):
job.postprocess(
error=True,
assume_shared_fs=self.assume_shared_fs,
latency_wait=self.latency_wait,
)
def get_additional_args(self):
"""Return a string to add to self.exec_job that includes additional
arguments from the command line. This is currently used in the
ClusterExecutor and CPUExecutor, as both were using the same
code. Both have base class of the RealExecutor.
"""
additional = ""
if not self.workflow.cleanup_scripts:
additional += " --skip-script-cleanup "
if self.workflow.shadow_prefix:
additional += " --shadow-prefix {} ".format(self.workflow.shadow_prefix)
if self.workflow.use_conda:
additional += " --use-conda "
if self.workflow.conda_prefix:
additional += " --conda-prefix {} ".format(self.workflow.conda_prefix)
if self.workflow.conda_base_path and self.assume_shared_fs:
additional += " --conda-base-path {} ".format(
self.workflow.conda_base_path
)
if self.workflow.use_singularity:
additional += " --use-singularity "
if self.workflow.singularity_prefix:
additional += " --singularity-prefix {} ".format(
self.workflow.singularity_prefix
)
if self.workflow.singularity_args:
additional += ' --singularity-args "{}"'.format(
self.workflow.singularity_args
)
if not self.workflow.execute_subworkflows:
additional += " --no-subworkflows "
if self.workflow.max_threads is not None:
additional += " --max-threads {} ".format(self.workflow.max_threads)
additional += self.get_set_resources_args()
additional += self.get_set_scatter_args()
additional += self.get_set_threads_args()
additional += self.get_behavior_args()
if self.workflow.use_env_modules:
additional += " --use-envmodules "
if not self.keepmetadata:
additional += " --drop-metadata "
return additional
def format_job_pattern(self, pattern, job=None, **kwargs):
overwrite_workdir = []
if self.workflow.overwrite_workdir:
overwrite_workdir.extend(("--directory", self.workflow.overwrite_workdir))
overwrite_config = []
if self.workflow.overwrite_configfiles:
# add each of the overwriting configfiles in the original order
if self.workflow.overwrite_configfiles:
overwrite_config.append("--configfiles")
overwrite_config.extend(self.workflow.overwrite_configfiles)
if self.workflow.config_args:
overwrite_config.append("--config")
overwrite_config.extend(self.workflow.config_args)
printshellcmds = ""
if self.workflow.printshellcmds:
printshellcmds = "-p"
if not job.is_branched and not job.is_updated:
# Restrict considered rules. This does not work for updated jobs
# because they need to be updated in the spawned process as well.
rules = ["--allowed-rules"]
rules.extend(job.rules)
else:
rules = []
target = kwargs.get("target", job.get_targets())
snakefile = kwargs.get("snakefile", self.snakefile)
cores = kwargs.get("cores", self.cores)
if "target" in kwargs:
del kwargs["target"]
if "snakefile" in kwargs:
del kwargs["snakefile"]
if "cores" in kwargs:
del kwargs["cores"]
cmd = format(
pattern,
job=job,
attempt=job.attempt,
overwrite_workdir=overwrite_workdir,
overwrite_config=overwrite_config,
printshellcmds=printshellcmds,
workflow=self.workflow,
snakefile=snakefile,
cores=cores,
benchmark_repeats=job.benchmark_repeats if not job.is_group() else None,
target=target,
rules=rules,
**kwargs,
)
return cmd
class TouchExecutor(RealExecutor):
def run(self, job, callback=None, submit_callback=None, error_callback=None):
super()._run(job)
try:
# Touching of output files will be done by handle_job_success
time.sleep(0.1)
callback(job)
except OSError as ex:
print_exception(ex, self.workflow.linemaps)
error_callback(job)
def handle_job_success(self, job):
super().handle_job_success(job, ignore_missing_output=True)
_ProcessPoolExceptions = (KeyboardInterrupt,)
try:
from concurrent.futures.process import BrokenProcessPool
_ProcessPoolExceptions = (KeyboardInterrupt, BrokenProcessPool)
except ImportError:
pass
class CPUExecutor(RealExecutor):
def __init__(
self,
workflow,
dag,
workers,
printreason=False,
quiet=False,
printshellcmds=False,
use_threads=False,
latency_wait=3,
cores=1,
keepincomplete=False,
keepmetadata=True,
):
super().__init__(
workflow,
dag,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
latency_wait=latency_wait,
keepincomplete=keepincomplete,
keepmetadata=keepmetadata,
)
self.exec_job = "\\\n".join(
(
"cd {workflow.workdir_init} && ",
"{sys.executable} -m snakemake {target} --snakefile {snakefile} ",
"--force --cores {cores} --keep-target-files --keep-remote ",
"--attempt {attempt} --scheduler {workflow.scheduler_type} ",
"--force-use-threads --wrapper-prefix {workflow.wrapper_prefix} ",
"--max-inventory-time 0 --ignore-incomplete ",
"--latency-wait {latency_wait} ",
self.get_default_remote_provider_args(),
self.get_default_resources_args(),
"{overwrite_workdir} {overwrite_config} {printshellcmds} {rules} ",
"--notemp --quiet --no-hooks --nolock --mode {} ".format(
Mode.subprocess
),
)
)
self.exec_job += self.get_additional_args()
self.use_threads = use_threads
self.cores = cores
# Zero thread jobs do not need a thread, but they occupy additional workers.
# Hence we need to reserve additional workers for them.
self.workers = workers + 5
self.pool = concurrent.futures.ThreadPoolExecutor(max_workers=self.workers)
def run(self, job, callback=None, submit_callback=None, error_callback=None):
super()._run(job)
if job.is_group():
# if we still don't have enough workers for this group, create a new pool here
missing_workers = max(len(job) - self.workers, 0)
if missing_workers:
self.workers += missing_workers
self.pool = concurrent.futures.ThreadPoolExecutor(
max_workers=self.workers
)
# the future waits for the entire group job
future = self.pool.submit(self.run_group_job, job)
else:
future = self.run_single_job(job)
future.add_done_callback(partial(self._callback, job, callback, error_callback))
def job_args_and_prepare(self, job):
job.prepare()
conda_env = job.conda_env_path if self.workflow.use_conda else None
container_img = (
job.container_img_path if self.workflow.use_singularity else None
)
env_modules = job.env_modules if self.workflow.use_env_modules else None
benchmark = None
benchmark_repeats = job.benchmark_repeats or 1
if job.benchmark is not None:
benchmark = str(job.benchmark)
return (
job.rule,
job.input._plainstrings(),
job.output._plainstrings(),
job.params,
job.wildcards,
job.threads,
job.resources,
job.log._plainstrings(),
benchmark,
benchmark_repeats,
conda_env,
container_img,
self.workflow.singularity_args,
env_modules,
self.workflow.use_singularity,
self.workflow.linemaps,
self.workflow.debug,
self.workflow.cleanup_scripts,
job.shadow_dir,
job.jobid,
self.workflow.edit_notebook,
self.workflow.conda_base_path,
job.rule.basedir,
)
def run_single_job(self, job):
if self.use_threads or (not job.is_shadow and not job.is_run):
future = self.pool.submit(
self.cached_or_run, job, run_wrapper, *self.job_args_and_prepare(job)
)
else:
# run directive jobs are spawned into subprocesses
future = self.pool.submit(self.cached_or_run, job, self.spawn_job, job)
return future
def run_group_job(self, job):
"""Run a pipe group job.
This lets all items run simultaneously."""
# we only have to consider pipe groups because in local running mode,
# these are the only groups that will occur
futures = [self.run_single_job(j) for j in job]
while True:
k = 0
for f in futures:
if f.done():
ex = f.exception()
if ex is not None:
# kill all shell commands of the other group jobs
# there can be only shell commands because the
# run directive is not allowed for pipe jobs
for j in job:
shell.kill(j.jobid)
raise ex
else:
k += 1
if k == len(futures):
return
time.sleep(1)
def spawn_job(self, job):
exec_job = self.exec_job
cmd = self.format_job_pattern(
exec_job, job=job, _quote_all=True, latency_wait=self.latency_wait
)
try:
subprocess.check_call(cmd, shell=True)
except subprocess.CalledProcessError as e:
raise SpawnedJobError()
def cached_or_run(self, job, run_func, *args):
"""
Either retrieve result from cache, or run job with given function.
"""
to_cache = self.workflow.is_cached_rule(job.rule)
try:
if to_cache:
self.workflow.output_file_cache.fetch(job)
return
except CacheMissException:
pass
run_func(*args)
if to_cache:
self.workflow.output_file_cache.store(job)
def shutdown(self):
self.pool.shutdown()
def cancel(self):
self.pool.shutdown()
def _callback(self, job, callback, error_callback, future):
try:
ex = future.exception()
if ex is not None:
raise ex
callback(job)
except _ProcessPoolExceptions:
self.handle_job_error(job)
# no error callback, just silently ignore the interrupt as the main scheduler is also killed
except SpawnedJobError:
# don't print error message, this is done by the spawned subprocess
error_callback(job)
except (Exception, BaseException) as ex:
self.print_job_error(job)
if not (job.is_group() or job.shellcmd) or self.workflow.verbose:
print_exception(ex, self.workflow.linemaps)
error_callback(job)
def handle_job_success(self, job):
super().handle_job_success(job)
def handle_job_error(self, job):
super().handle_job_error(job)
if not self.keepincomplete:
job.cleanup()
self.workflow.persistence.cleanup(job)
class ClusterExecutor(RealExecutor):
"""Backend for distributed execution.
The key idea is that a job is converted into a script that invokes Snakemake again, in whatever environment is targeted. The script is submitted to some job management platform (e.g. a cluster scheduler like slurm).
This class can be specialized to generate more specific backends, also for the cloud.
"""
default_jobscript = "jobscript.sh"
def __init__(
self,
workflow,
dag,
cores,
jobname="snakejob.{name}.{jobid}.sh",
printreason=False,
quiet=False,
printshellcmds=False,
latency_wait=3,
cluster_config=None,
local_input=None,
restart_times=None,
exec_job=None,
assume_shared_fs=True,
max_status_checks_per_second=1,
disable_default_remote_provider_args=False,
disable_get_default_resources_args=False,
keepincomplete=False,
keepmetadata=True,
):
from ratelimiter import RateLimiter
local_input = local_input or []
super().__init__(
workflow,
dag,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
latency_wait=latency_wait,
assume_shared_fs=assume_shared_fs,
keepincomplete=keepincomplete,
keepmetadata=keepmetadata,
)
if not self.assume_shared_fs:
# use relative path to Snakefile
self.snakefile = os.path.relpath(workflow.main_snakefile)
jobscript = workflow.jobscript
if jobscript is None:
jobscript = os.path.join(os.path.dirname(__file__), self.default_jobscript)
try:
with open(jobscript) as f:
self.jobscript = f.read()
except IOError as e:
raise WorkflowError(e)
if not "jobid" in get_wildcard_names(jobname):
raise WorkflowError(
'Defined jobname ("{}") has to contain the wildcard {jobid}.'
)
if exec_job is None:
self.exec_job = "\\\n".join(
(
"{envvars} " "cd {workflow.workdir_init} && "
if assume_shared_fs
else "",
"{sys.executable} " if assume_shared_fs else "python ",
"-m snakemake {target} --snakefile {snakefile} ",
"--force --cores {cores} --keep-target-files --keep-remote --max-inventory-time 0 ",
"{waitfiles_parameter:u} --latency-wait {latency_wait} ",
" --attempt {attempt} {use_threads} --scheduler {workflow.scheduler_type} ",
"--wrapper-prefix {workflow.wrapper_prefix} ",
"{overwrite_workdir} {overwrite_config} {printshellcmds} {rules} "
"--nocolor --notemp --no-hooks --nolock {scheduler_solver_path:u} ",
"--mode {} ".format(Mode.cluster),
)
)
else:
self.exec_job = exec_job
self.exec_job += self.get_additional_args()
if not disable_default_remote_provider_args:
self.exec_job += self.get_default_remote_provider_args()
if not disable_get_default_resources_args:
self.exec_job += self.get_default_resources_args()
self.jobname = jobname
self._tmpdir = None
self.cores = cores if cores else "all"
self.cluster_config = cluster_config if cluster_config else dict()
self.restart_times = restart_times
self.active_jobs = list()
self.lock = threading.Lock()
self.wait = True
self.wait_thread = threading.Thread(target=self._wait_thread)
self.wait_thread.daemon = True
self.wait_thread.start()
self.max_status_checks_per_second = max_status_checks_per_second
self.status_rate_limiter = RateLimiter(
max_calls=self.max_status_checks_per_second, period=1
)
def _wait_thread(self):
try:
self._wait_for_jobs()
except Exception as e:
self.workflow.scheduler.executor_error_callback(e)
def shutdown(self):
with self.lock:
self.wait = False
self.wait_thread.join()
if not self.workflow.immediate_submit:
# Only delete tmpdir (containing jobscripts) if not using
# immediate_submit. With immediate_submit, jobs can be scheduled
# after this method is completed. Hence we have to keep the
# directory.
shutil.rmtree(self.tmpdir)
def cancel(self):
self.shutdown()
def _run(self, job, callback=None, error_callback=None):
if self.assume_shared_fs:
job.remove_existing_output()
job.download_remote_input()
super()._run(job, callback=callback, error_callback=error_callback)
@property
def tmpdir(self):
if self._tmpdir is None:
self._tmpdir = tempfile.mkdtemp(dir=".snakemake", prefix="tmp.")
return os.path.abspath(self._tmpdir)
def get_jobscript(self, job):
f = job.format_wildcards(self.jobname, cluster=self.cluster_wildcards(job))
if os.path.sep in f:
raise WorkflowError(
"Path separator ({}) found in job name {}. "
"This is not supported.".format(os.path.sep, f)
)
return os.path.join(self.tmpdir, f)
def format_job(self, pattern, job, **kwargs):
wait_for_files = []
scheduler_solver_path = ""
if self.assume_shared_fs:
wait_for_files.append(self.tmpdir)
wait_for_files.extend(job.get_wait_for_files())
# Prepend PATH of current python executable to PATH.
# This way, we ensure that the snakemake process in the cluster node runs
# in the same environment as the current process.
# This is necessary in order to find the pulp solver backends (e.g. coincbc).
scheduler_solver_path = "--scheduler-solver-path {}".format(
os.path.dirname(sys.executable)
)
# Only create extra file if we have more than 20 input files.
# This should not require the file creation in most cases.
if len(wait_for_files) > 20:
wait_for_files_file = self.get_jobscript(job) + ".waitforfilesfile.txt"
with open(wait_for_files_file, "w") as fd:
fd.write("\n".join(wait_for_files))
waitfiles_parameter = format(
"--wait-for-files-file {wait_for_files_file}",
wait_for_files_file=wait_for_files_file,
)
else:
waitfiles_parameter = format(
"--wait-for-files {wait_for_files}", wait_for_files=wait_for_files
)
format_p = partial(
self.format_job_pattern,
job=job,
properties=job.properties(cluster=self.cluster_params(job)),
latency_wait=self.latency_wait,
waitfiles_parameter=waitfiles_parameter,
scheduler_solver_path=scheduler_solver_path,
**kwargs,
)
try:
return format_p(pattern)
except KeyError as e:
raise WorkflowError(
"Error formatting jobscript: {} not found\n"
"Make sure that your custom jobscript is up to date.".format(e)
)
def write_jobscript(self, job, jobscript, **kwargs):
# only force threads if this is not a group job
# otherwise we want proper process handling
use_threads = "--force-use-threads" if not job.is_group() else ""
envvars = " ".join(
"{}={}".format(var, os.environ[var]) for var in self.workflow.envvars
)
exec_job = self.format_job(
self.exec_job,
job,
_quote_all=True,
use_threads=use_threads,
envvars=envvars,
**kwargs,
)
content = self.format_job(self.jobscript, job, exec_job=exec_job, **kwargs)
logger.debug("Jobscript:\n{}".format(content))
with open(jobscript, "w") as f:
print(content, file=f)
os.chmod(jobscript, os.stat(jobscript).st_mode | stat.S_IXUSR | stat.S_IRUSR)
def cluster_params(self, job):
"""Return wildcards object for job from cluster_config."""
cluster = self.cluster_config.get("__default__", dict()).copy()
cluster.update(self.cluster_config.get(job.name, dict()))
# Format values with available parameters from the job.
for key, value in list(cluster.items()):
if isinstance(value, str):
try:
cluster[key] = job.format_wildcards(value)
except NameError as e:
if job.is_group():
msg = (
"Failed to format cluster config for group job. "
"You have to ensure that your default entry "
"does not contain any items that group jobs "
"cannot provide, like {rule}, {wildcards}."
)
else:
msg = (
"Failed to format cluster config "
"entry for job {}.".format(job.rule.name)
)
raise WorkflowError(msg, e)
return cluster
def cluster_wildcards(self, job):
return Wildcards(fromdict=self.cluster_params(job))
def handle_job_success(self, job):
super().handle_job_success(
job, upload_remote=False, handle_log=False, handle_touch=False
)
def handle_job_error(self, job):
# TODO what about removing empty remote dirs?? This cannot be decided
# on the cluster node.
super().handle_job_error(job, upload_remote=False)
logger.debug("Cleanup job metadata.")
# We have to remove metadata here as well.
# It will be removed by the CPUExecutor in case of a shared FS,
# but we might not see the removal due to filesystem latency.
# By removing it again, we make sure that it is gone on the host FS.
if not self.keepincomplete:
self.workflow.persistence.cleanup(job)
# Also cleanup the jobs output files, in case the remote job
# was not able to, due to e.g. timeout.
logger.debug("Cleanup failed jobs output files.")
job.cleanup()
def print_cluster_job_error(self, job_info, jobid):
job = job_info.job
kind = (
"rule {}".format(job.rule.name)
if not job.is_group()
else "group job {}".format(job.groupid)
)
logger.error(
"Error executing {} on cluster (jobid: {}, external: "
"{}, jobscript: {}). For error details see the cluster "
"log and the log files of the involved rule(s).".format(
kind, jobid, job_info.jobid, job_info.jobscript
)
)
GenericClusterJob = namedtuple(
"GenericClusterJob",
"job jobid callback error_callback jobscript jobfinished jobfailed",
)
class GenericClusterExecutor(ClusterExecutor):
def __init__(
self,
workflow,
dag,
cores,
submitcmd="qsub",
statuscmd=None,
cluster_config=None,
jobname="snakejob.{rulename}.{jobid}.sh",
printreason=False,
quiet=False,
printshellcmds=False,
latency_wait=3,
restart_times=0,
assume_shared_fs=True,
max_status_checks_per_second=1,
keepincomplete=False,
keepmetadata=True,
):
self.submitcmd = submitcmd
if not assume_shared_fs and statuscmd is None:
raise WorkflowError(
"When no shared filesystem can be assumed, a "
"status command must be given."
)
self.statuscmd = statuscmd
self.external_jobid = dict()
super().__init__(
workflow,
dag,
cores,
jobname=jobname,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
latency_wait=latency_wait,
cluster_config=cluster_config,
restart_times=restart_times,
assume_shared_fs=assume_shared_fs,
max_status_checks_per_second=max_status_checks_per_second,
keepincomplete=keepincomplete,
keepmetadata=keepmetadata,
)
if statuscmd:
self.exec_job += " && exit 0 || exit 1"
elif assume_shared_fs:
# TODO wrap with watch and touch {jobrunning}
# check modification date of {jobrunning} in the wait_for_job method
self.exec_job += " && touch {jobfinished} || (touch {jobfailed}; exit 1)"
else:
raise WorkflowError(
"If no shared filesystem is used, you have to "
"specify a cluster status command."
)
def cancel(self):
logger.info("Will exit after finishing currently running jobs.")
self.shutdown()
def register_job(self, job):
# Do not register job here.
# Instead do it manually once the jobid is known.
pass
def run(self, job, callback=None, submit_callback=None, error_callback=None):
super()._run(job)
workdir = os.getcwd()
jobid = job.jobid
jobscript = self.get_jobscript(job)
jobfinished = os.path.join(self.tmpdir, "{}.jobfinished".format(jobid))
jobfailed = os.path.join(self.tmpdir, "{}.jobfailed".format(jobid))
self.write_jobscript(
job, jobscript, jobfinished=jobfinished, jobfailed=jobfailed
)
if self.statuscmd:
ext_jobid = self.dag.incomplete_external_jobid(job)
if ext_jobid:
# Job is incomplete and still running.
# We simply register it and wait for completion or failure.
logger.info(
"Resuming incomplete job {} with external jobid '{}'.".format(
jobid, ext_jobid
)
)
submit_callback(job)
with self.lock:
self.active_jobs.append(
GenericClusterJob(
job,
ext_jobid,
callback,
error_callback,
jobscript,
jobfinished,
jobfailed,
)
)
return
deps = " ".join(
self.external_jobid[f] for f in job.input if f in self.external_jobid
)
try:
submitcmd = job.format_wildcards(
self.submitcmd, dependencies=deps, cluster=self.cluster_wildcards(job)
)
except AttributeError as e:
raise WorkflowError(str(e), rule=job.rule if not job.is_group() else None)
try:
ext_jobid = (
subprocess.check_output(
'{submitcmd} "{jobscript}"'.format(
submitcmd=submitcmd, jobscript=jobscript
),
shell=True,
)
.decode()
.split("\n")
)
except subprocess.CalledProcessError as ex:
logger.error(
"Error submitting jobscript (exit code {}):\n{}".format(
ex.returncode, ex.output.decode()
)
)
error_callback(job)
return
if ext_jobid and ext_jobid[0]:
ext_jobid = ext_jobid[0]
self.external_jobid.update((f, ext_jobid) for f in job.output)
logger.info(
"Submitted {} {} with external jobid '{}'.".format(
"group job" if job.is_group() else "job", jobid, ext_jobid
)
)
self.workflow.persistence.started(job, external_jobid=ext_jobid)
submit_callback(job)
with self.lock:
self.active_jobs.append(
GenericClusterJob(
job,
ext_jobid,
callback,
error_callback,
jobscript,
jobfinished,
jobfailed,
)
)
def _wait_for_jobs(self):
success = "success"
failed = "failed"
running = "running"
status_cmd_kills = set()
if self.statuscmd is not None:
def job_status(job, valid_returns=["running", "success", "failed"]):
try:
# this command shall return "success", "failed" or "running"
ret = subprocess.check_output(
"{statuscmd} {jobid}".format(
jobid=job.jobid, statuscmd=self.statuscmd
),
shell=True,
).decode()
except subprocess.CalledProcessError as e:
if e.returncode < 0:
# Ignore SIGINT and all other issues due to signals
# because it will be caused by hitting e.g.
# Ctrl-C on the main process or sending killall to
# snakemake.
# Snakemake will handle the signal in
# the main process.
status_cmd_kills.add(e.returncode)
if len(status_cmd_kills) > 10:
logger.info(
"Cluster status command {} was killed >10 times with signal(s) {} "
"(if this happens unexpectedly during your workflow execution, "
"have a closer look.).".format(
self.statuscmd, ",".join(status_cmd_kills)
)
)
status_cmd_kills.clear()
else:
raise WorkflowError(
"Failed to obtain job status. "
"See above for error message."
)
ret = ret.strip().split("\n")
if len(ret) != 1 or ret[0] not in valid_returns:
raise WorkflowError(
"Cluster status command {} returned {} but just a single line with one of {} is expected.".format(
self.statuscmd, "\\n".join(ret), ",".join(valid_returns)
)
)
return ret[0]
else:
def job_status(job):
if os.path.exists(active_job.jobfinished):
os.remove(active_job.jobfinished)
os.remove(active_job.jobscript)
return success
if os.path.exists(active_job.jobfailed):
os.remove(active_job.jobfailed)
os.remove(active_job.jobscript)
return failed
return running
while True:
with self.lock:
if not self.wait:
return
active_jobs = self.active_jobs
self.active_jobs = list()
still_running = list()
# logger.debug("Checking status of {} jobs.".format(len(active_jobs)))
for active_job in active_jobs:
with self.status_rate_limiter:
status = job_status(active_job)
if status == success:
active_job.callback(active_job.job)
elif status == failed:
self.print_job_error(
active_job.job,
cluster_jobid=active_job.jobid
if active_job.jobid
else "unknown",
)
self.print_cluster_job_error(
active_job, self.dag.jobid(active_job.job)
)
active_job.error_callback(active_job.job)
else:
still_running.append(active_job)
with self.lock:
self.active_jobs.extend(still_running)
sleep()
SynchronousClusterJob = namedtuple(
"SynchronousClusterJob", "job jobid callback error_callback jobscript process"
)
class SynchronousClusterExecutor(ClusterExecutor):
"""
invocations like "qsub -sync y" (SGE) or "bsub -K" (LSF) are
synchronous, blocking the foreground thread and returning the
remote exit code at remote exit.
"""
def __init__(
self,
workflow,
dag,
cores,
submitcmd="qsub",
cluster_config=None,
jobname="snakejob.{rulename}.{jobid}.sh",
printreason=False,
quiet=False,
printshellcmds=False,
latency_wait=3,
restart_times=0,
assume_shared_fs=True,
keepincomplete=False,
keepmetadata=True,
):
super().__init__(
workflow,
dag,
cores,
jobname=jobname,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
latency_wait=latency_wait,
cluster_config=cluster_config,
restart_times=restart_times,
assume_shared_fs=assume_shared_fs,
max_status_checks_per_second=10,
keepincomplete=keepincomplete,
keepmetadata=keepmetadata,
)
self.submitcmd = submitcmd
self.external_jobid = dict()
def cancel(self):
logger.info("Will exit after finishing currently running jobs.")
self.shutdown()
def run(self, job, callback=None, submit_callback=None, error_callback=None):
super()._run(job)
workdir = os.getcwd()
jobid = job.jobid
jobscript = self.get_jobscript(job)
self.write_jobscript(job, jobscript)
deps = " ".join(
self.external_jobid[f] for f in job.input if f in self.external_jobid
)
try:
submitcmd = job.format_wildcards(
self.submitcmd, dependencies=deps, cluster=self.cluster_wildcards(job)
)
except AttributeError as e:
raise WorkflowError(str(e), rule=job.rule if not job.is_group() else None)
process = subprocess.Popen(
'{submitcmd} "{jobscript}"'.format(
submitcmd=submitcmd, jobscript=jobscript
),
shell=True,
)
submit_callback(job)
with self.lock:
self.active_jobs.append(
SynchronousClusterJob(
job, process.pid, callback, error_callback, jobscript, process
)
)
def _wait_for_jobs(self):
while True:
with self.lock:
if not self.wait:
return
active_jobs = self.active_jobs
self.active_jobs = list()
still_running = list()
for active_job in active_jobs:
with self.status_rate_limiter:
exitcode = active_job.process.poll()
if exitcode is None:
# job not yet finished
still_running.append(active_job)
elif exitcode == 0:
# job finished successfully
os.remove(active_job.jobscript)
active_job.callback(active_job.job)
else:
# job failed
os.remove(active_job.jobscript)
self.print_job_error(active_job.job)
self.print_cluster_job_error(
active_job, self.dag.jobid(active_job.job)
)
active_job.error_callback(active_job.job)
with self.lock:
self.active_jobs.extend(still_running)
sleep()
DRMAAClusterJob = namedtuple(
"DRMAAClusterJob", "job jobid callback error_callback jobscript"
)
class DRMAAExecutor(ClusterExecutor):
def __init__(
self,
workflow,
dag,
cores,
jobname="snakejob.{rulename}.{jobid}.sh",
printreason=False,
quiet=False,
printshellcmds=False,
drmaa_args="",
drmaa_log_dir=None,
latency_wait=3,
cluster_config=None,
restart_times=0,
assume_shared_fs=True,
max_status_checks_per_second=1,
keepincomplete=False,
keepmetadata=True,
):
super().__init__(
workflow,
dag,
cores,
jobname=jobname,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
latency_wait=latency_wait,
cluster_config=cluster_config,
restart_times=restart_times,
assume_shared_fs=assume_shared_fs,
max_status_checks_per_second=max_status_checks_per_second,
keepincomplete=keepincomplete,
keepmetadata=keepmetadata,
)
try:
import drmaa
except ImportError:
raise WorkflowError(
"Python support for DRMAA is not installed. "
"Please install it, e.g. with easy_install3 --user drmaa"
)
except RuntimeError as e:
raise WorkflowError("Error loading drmaa support:\n{}".format(e))
self.session = drmaa.Session()
self.drmaa_args = drmaa_args
self.drmaa_log_dir = drmaa_log_dir
self.session.initialize()
self.submitted = list()
def cancel(self):
from drmaa.const import JobControlAction
from drmaa.errors import InvalidJobException, InternalException
for jobid in self.submitted:
try:
self.session.control(jobid, JobControlAction.TERMINATE)
except (InvalidJobException, InternalException):
# This is common - logging a warning would probably confuse the user.
pass
self.shutdown()
def run(self, job, callback=None, submit_callback=None, error_callback=None):
super()._run(job)
jobscript = self.get_jobscript(job)
self.write_jobscript(job, jobscript)
try:
drmaa_args = job.format_wildcards(
self.drmaa_args, cluster=self.cluster_wildcards(job)
)
except AttributeError as e:
raise WorkflowError(str(e), rule=job.rule)
import drmaa
if self.drmaa_log_dir:
makedirs(self.drmaa_log_dir)
try:
jt = self.session.createJobTemplate()
jt.remoteCommand = jobscript
jt.nativeSpecification = drmaa_args
if self.drmaa_log_dir:
jt.outputPath = ":" + self.drmaa_log_dir
jt.errorPath = ":" + self.drmaa_log_dir
jt.jobName = os.path.basename(jobscript)
jobid = self.session.runJob(jt)
except (
drmaa.DeniedByDrmException,
drmaa.InternalException,
drmaa.InvalidAttributeValueException,
) as e:
print_exception(
WorkflowError("DRMAA Error: {}".format(e)), self.workflow.linemaps
)
error_callback(job)
return
logger.info(
"Submitted DRMAA job {} with external jobid {}.".format(job.jobid, jobid)
)
self.submitted.append(jobid)
self.session.deleteJobTemplate(jt)
submit_callback(job)
with self.lock:
self.active_jobs.append(
DRMAAClusterJob(job, jobid, callback, error_callback, jobscript)
)
def shutdown(self):
super().shutdown()
self.session.exit()
def _wait_for_jobs(self):
import drmaa
suspended_msg = set()
while True:
with self.lock:
if not self.wait:
return
active_jobs = self.active_jobs
self.active_jobs = list()
still_running = list()
for active_job in active_jobs:
with self.status_rate_limiter:
try:
retval = self.session.jobStatus(active_job.jobid)
except drmaa.ExitTimeoutException as e:
# job still active
still_running.append(active_job)
continue
except (drmaa.InternalException, Exception) as e:
print_exception(
WorkflowError("DRMAA Error: {}".format(e)),
self.workflow.linemaps,
)
os.remove(active_job.jobscript)
active_job.error_callback(active_job.job)
continue
if retval == drmaa.JobState.DONE:
os.remove(active_job.jobscript)
active_job.callback(active_job.job)
elif retval == drmaa.JobState.FAILED:
os.remove(active_job.jobscript)
self.print_job_error(active_job.job)
self.print_cluster_job_error(
active_job, self.dag.jobid(active_job.job)
)
active_job.error_callback(active_job.job)
else:
# still running
still_running.append(active_job)
def handle_suspended(by):
if active_job.job.jobid not in suspended_msg:
logger.warning(
"Job {} (DRMAA id: {}) was suspended by {}.".format(
active_job.job.jobid, active_job.jobid, by
)
)
suspended_msg.add(active_job.job.jobid)
if retval == drmaa.JobState.USER_SUSPENDED:
handle_suspended("user")
elif retval == drmaa.JobState.SYSTEM_SUSPENDED:
handle_suspended("system")
else:
try:
suspended_msg.remove(active_job.job.jobid)
except KeyError:
# there was nothing to remove
pass
with self.lock:
self.active_jobs.extend(still_running)
sleep()
@contextlib.contextmanager
def change_working_directory(directory=None):
"""Change working directory in execution context if provided."""
if directory:
try:
saved_directory = os.getcwd()
logger.info("Changing to shadow directory: {}".format(directory))
os.chdir(directory)
yield
finally:
os.chdir(saved_directory)
else:
yield
KubernetesJob = namedtuple(
"KubernetesJob", "job jobid callback error_callback kubejob jobscript"
)
class KubernetesExecutor(ClusterExecutor):
def __init__(
self,
workflow,
dag,
namespace,
container_image=None,
jobname="{rulename}.{jobid}",
printreason=False,
quiet=False,
printshellcmds=False,
latency_wait=3,
cluster_config=None,
local_input=None,
restart_times=None,
keepincomplete=False,
keepmetadata=True,
):
self.workflow = workflow
exec_job = (
"cp -rf /source/. . && "
"snakemake {target} --snakefile {snakefile} "
"--force --cores {cores} --keep-target-files --keep-remote "
"--latency-wait {latency_wait} --scheduler {workflow.scheduler_type} "
" --attempt {attempt} {use_threads} --max-inventory-time 0 "
"--wrapper-prefix {workflow.wrapper_prefix} "
"{overwrite_config} {printshellcmds} {rules} --nocolor "
"--notemp --no-hooks --nolock "
)
super().__init__(
workflow,
dag,
None,
jobname=jobname,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
latency_wait=latency_wait,
cluster_config=cluster_config,
local_input=local_input,
restart_times=restart_times,
exec_job=exec_job,
assume_shared_fs=False,
max_status_checks_per_second=10,
)
# use relative path to Snakefile
self.snakefile = os.path.relpath(workflow.main_snakefile)
try:
from kubernetes import config
except ImportError:
raise WorkflowError(
"The Python 3 package 'kubernetes' "
"must be installed to use Kubernetes"
)
config.load_kube_config()
import kubernetes.client
self.kubeapi = kubernetes.client.CoreV1Api()
self.batchapi = kubernetes.client.BatchV1Api()
self.namespace = namespace
self.envvars = workflow.envvars
self.secret_files = {}
self.run_namespace = str(uuid.uuid4())
self.secret_envvars = {}
self.register_secret()
self.container_image = container_image or get_container_image()
def register_secret(self):
import kubernetes.client
secret = kubernetes.client.V1Secret()
secret.metadata = kubernetes.client.V1ObjectMeta()
# create a random uuid
secret.metadata.name = self.run_namespace
secret.type = "Opaque"
secret.data = {}
for i, f in enumerate(self.workflow.get_sources()):
if f.startswith(".."):
logger.warning(
"Ignoring source file {}. Only files relative "
"to the working directory are allowed.".format(f)
)
continue
# The kubernetes API can't create secret files larger than 1MB.
source_file_size = os.path.getsize(f)
max_file_size = 1048576
if source_file_size > max_file_size:
logger.warning(
"Skipping the source file {f}. Its size {source_file_size} exceeds "
"the maximum file size (1MB) that can be passed "
"from host to kubernetes.".format(
f=f, source_file_size=source_file_size
)
)
continue
with open(f, "br") as content:
key = "f{}".format(i)
# Some files are smaller than 1MB, but grows larger after being base64 encoded
# We should exclude them as well, otherwise Kubernetes APIs will complain
encoded_contents = base64.b64encode(content.read()).decode()
encoded_size = len(encoded_contents)
if encoded_size > 1048576:
logger.warning(
"Skipping the source file {f} for secret key {key}. "
"Its base64 encoded size {encoded_size} exceeds "
"the maximum file size (1MB) that can be passed "
"from host to kubernetes.".format(
f=f,
source_file_size=source_file_size,
key=key,
encoded_size=encoded_size,
)
)
continue
self.secret_files[key] = f
secret.data[key] = encoded_contents
for e in self.envvars:
try:
key = e.lower()
secret.data[key] = base64.b64encode(os.environ[e].encode()).decode()
self.secret_envvars[key] = e
except KeyError:
continue
# Test if the total size of the configMap exceeds 1MB
config_map_size = sum(
[len(base64.b64decode(v)) for k, v in secret.data.items()]
)
if config_map_size > 1048576:
logger.warning(
"The total size of the included files and other Kubernetes secrets "
"is {}, exceeding the 1MB limit.\n".format(config_map_size)
)
logger.warning(
"The following are the largest files. Consider removing some of them "
"(you need remove at least {} bytes):".format(config_map_size - 1048576)
)
entry_sizes = {
self.secret_files[k]: len(base64.b64decode(v))
for k, v in secret.data.items()
if k in self.secret_files
}
for k, v in sorted(entry_sizes.items(), key=lambda item: item[1])[:-6:-1]:
logger.warning(" * File: {k}, original size: {v}".format(k=k, v=v))
raise WorkflowError("ConfigMap too large")
self.kubeapi.create_namespaced_secret(self.namespace, secret)
def unregister_secret(self):
import kubernetes.client
safe_delete_secret = lambda: self.kubeapi.delete_namespaced_secret(
self.run_namespace, self.namespace, body=kubernetes.client.V1DeleteOptions()
)
self._kubernetes_retry(safe_delete_secret)
# In rare cases, deleting a pod may rais 404 NotFound error.
def safe_delete_pod(self, jobid, ignore_not_found=True):
import kubernetes.client
body = kubernetes.client.V1DeleteOptions()
try:
self.kubeapi.delete_namespaced_pod(jobid, self.namespace, body=body)
except kubernetes.client.rest.ApiException as e:
if e.status == 404 and ignore_not_found:
# Can't find the pod. Maybe it's already been
# destroyed. Proceed with a warning message.
logger.warning(
"[WARNING] 404 not found when trying to delete the pod: {jobid}\n"
"[WARNING] Ignore this error\n".format(jobid=jobid)
)
else:
raise e
def shutdown(self):
self.unregister_secret()
super().shutdown()
def cancel(self):
import kubernetes.client
body = kubernetes.client.V1DeleteOptions()
with self.lock:
for j in self.active_jobs:
func = lambda: self.safe_delete_pod(j.jobid, ignore_not_found=True)
self._kubernetes_retry(func)
self.shutdown()
def run(self, job, callback=None, submit_callback=None, error_callback=None):
import kubernetes.client
super()._run(job)
exec_job = self.format_job(
self.exec_job,
job,
_quote_all=True,
use_threads="--force-use-threads" if not job.is_group() else "",
)
# Kubernetes silently does not submit a job if the name is too long
# therefore, we ensure that it is not longer than snakejob+uuid.
jobid = "snakejob-{}".format(
get_uuid("{}-{}-{}".format(self.run_namespace, job.jobid, job.attempt))
)
body = kubernetes.client.V1Pod()
body.metadata = kubernetes.client.V1ObjectMeta(labels={"app": "snakemake"})
body.metadata.name = jobid
# container
container = kubernetes.client.V1Container(name=jobid)
container.image = self.container_image
container.command = shlex.split("/bin/sh")
container.args = ["-c", exec_job]
container.working_dir = "/workdir"
container.volume_mounts = [
kubernetes.client.V1VolumeMount(name="workdir", mount_path="/workdir")
]
container.volume_mounts = [
kubernetes.client.V1VolumeMount(name="source", mount_path="/source")
]
body.spec = kubernetes.client.V1PodSpec(containers=[container])
# fail on first error
body.spec.restart_policy = "Never"
# source files as a secret volume
# we copy these files to the workdir before executing Snakemake
too_large = [
path
for path in self.secret_files.values()
if os.path.getsize(path) > 1000000
]
if too_large:
raise WorkflowError(
"The following source files exceed the maximum "
"file size (1MB) that can be passed from host to "
"kubernetes. These are likely not source code "
"files. Consider adding them to your "
"remote storage instead or (if software) use "
"Conda packages or container images:\n{}".format("\n".join(too_large))
)
secret_volume = kubernetes.client.V1Volume(name="source")
secret_volume.secret = kubernetes.client.V1SecretVolumeSource()
secret_volume.secret.secret_name = self.run_namespace
secret_volume.secret.items = [
kubernetes.client.V1KeyToPath(key=key, path=path)
for key, path in self.secret_files.items()
]
# workdir as an emptyDir volume of undefined size
workdir_volume = kubernetes.client.V1Volume(name="workdir")
workdir_volume.empty_dir = kubernetes.client.V1EmptyDirVolumeSource()
body.spec.volumes = [secret_volume, workdir_volume]
# env vars
container.env = []
for key, e in self.secret_envvars.items():
envvar = kubernetes.client.V1EnvVar(name=e)
envvar.value_from = kubernetes.client.V1EnvVarSource()
envvar.value_from.secret_key_ref = kubernetes.client.V1SecretKeySelector(
key=key, name=self.run_namespace
)
container.env.append(envvar)
# request resources
container.resources = kubernetes.client.V1ResourceRequirements()
container.resources.requests = {}
container.resources.requests["cpu"] = job.resources["_cores"]
if "mem_mb" in job.resources.keys():
container.resources.requests["memory"] = "{}M".format(
job.resources["mem_mb"]
)
# capabilities
if job.needs_singularity and self.workflow.use_singularity:
# TODO this should work, but it doesn't currently because of
# missing loop devices
# singularity inside docker requires SYS_ADMIN capabilities
# see https://groups.google.com/a/lbl.gov/forum/#!topic/singularity/e9mlDuzKowc
# container.capabilities = kubernetes.client.V1Capabilities()
# container.capabilities.add = ["SYS_ADMIN",
# "DAC_OVERRIDE",
# "SETUID",
# "SETGID",
# "SYS_CHROOT"]
# Running in priviledged mode always works
container.security_context = kubernetes.client.V1SecurityContext(
privileged=True
)
pod = self._kubernetes_retry(
lambda: self.kubeapi.create_namespaced_pod(self.namespace, body)
)
logger.info(
"Get status with:\n"
"kubectl describe pod {jobid}\n"
"kubectl logs {jobid}".format(jobid=jobid)
)
self.active_jobs.append(
KubernetesJob(job, jobid, callback, error_callback, pod, None)
)
# Sometimes, certain k8s requests throw kubernetes.client.rest.ApiException
# Solving this issue requires reauthentication, as _kubernetes_retry shows
# However, reauthentication itself, under rare conditions, may also throw
# errors such as:
# kubernetes.client.exceptions.ApiException: (409), Reason: Conflict
#
# This error doesn't mean anything wrong with the k8s cluster, and users can safely
# ignore it.
def _reauthenticate_and_retry(self, func=None):
import kubernetes
# Unauthorized.
# Reload config in order to ensure token is
# refreshed. Then try again.
logger.info("Trying to reauthenticate")
kubernetes.config.load_kube_config()
subprocess.run(["kubectl", "get", "nodes"])
self.kubeapi = kubernetes.client.CoreV1Api()
self.batchapi = kubernetes.client.BatchV1Api()
try:
self.register_secret()
except kubernetes.client.rest.ApiException as e:
if e.status == 409 and e.reason == "Conflict":
logger.warning("409 conflict ApiException when registering secrets")
logger.warning(e)
else:
raise WorkflowError(
e,
"This is likely a bug in "
"https://github.com/kubernetes-client/python.",
)
if func:
return func()
def _kubernetes_retry(self, func):
import kubernetes
import urllib3
with self.lock:
try:
return func()
except kubernetes.client.rest.ApiException as e:
if e.status == 401:
# Unauthorized.
# Reload config in order to ensure token is
# refreshed. Then try again.
return self._reauthenticate_and_retry(func)
# Handling timeout that may occur in case of GKE master upgrade
except urllib3.exceptions.MaxRetryError as e:
logger.info(
"Request time out! "
"check your connection to Kubernetes master"
"Workflow will pause for 5 minutes to allow any update operations to complete"
)
time.sleep(300)
try:
return func()
except:
# Still can't reach the server after 5 minutes
raise WorkflowError(
e,
"Error 111 connection timeout, please check"
" that the k8 cluster master is reachable!",
)
def _wait_for_jobs(self):
import kubernetes
while True:
with self.lock:
if not self.wait:
return
active_jobs = self.active_jobs
self.active_jobs = list()
still_running = list()
for j in active_jobs:
with self.status_rate_limiter:
logger.debug("Checking status for pod {}".format(j.jobid))
job_not_found = False
try:
res = self._kubernetes_retry(
lambda: self.kubeapi.read_namespaced_pod_status(
j.jobid, self.namespace
)
)
except kubernetes.client.rest.ApiException as e:
if e.status == 404:
# Jobid not found
# The job is likely already done and was deleted on
# the server.
j.callback(j.job)
continue
except WorkflowError as e:
print_exception(e, self.workflow.linemaps)
j.error_callback(j.job)
continue
if res is None:
msg = (
"Unknown pod {jobid}. "
"Has the pod been deleted "
"manually?"
).format(jobid=j.jobid)
self.print_job_error(j.job, msg=msg, jobid=j.jobid)
j.error_callback(j.job)
elif res.status.phase == "Failed":
msg = (
"For details, please issue:\n"
"kubectl describe pod {jobid}\n"
"kubectl logs {jobid}"
).format(jobid=j.jobid)
# failed
self.print_job_error(j.job, msg=msg, jobid=j.jobid)
j.error_callback(j.job)
elif res.status.phase == "Succeeded":
# finished
j.callback(j.job)
func = lambda: self.safe_delete_pod(
j.jobid, ignore_not_found=True
)
self._kubernetes_retry(func)
else:
# still active
still_running.append(j)
with self.lock:
self.active_jobs.extend(still_running)
sleep()
TibannaJob = namedtuple(
"TibannaJob", "job jobname jobid exec_arn callback error_callback"
)
class TibannaExecutor(ClusterExecutor):
def __init__(
self,
workflow,
dag,
cores,
tibanna_sfn,
precommand="",
tibanna_config=False,
container_image=None,
printreason=False,
quiet=False,
printshellcmds=False,
latency_wait=3,
local_input=None,
restart_times=None,
max_status_checks_per_second=1,
keepincomplete=False,
keepmetadata=True,
):
self.workflow = workflow
self.workflow_sources = []
for wfs in workflow.get_sources():
if os.path.isdir(wfs):
for (dirpath, dirnames, filenames) in os.walk(wfs):
self.workflow_sources.extend(
[os.path.join(dirpath, f) for f in filenames]
)
else:
self.workflow_sources.append(os.path.abspath(wfs))
log = "sources="
for f in self.workflow_sources:
log += f
logger.debug(log)
self.snakefile = workflow.main_snakefile
self.envvars = {e: os.environ[e] for e in workflow.envvars}
if self.envvars:
logger.debug("envvars = %s" % str(self.envvars))
self.tibanna_sfn = tibanna_sfn
if precommand:
self.precommand = precommand
else:
self.precommand = ""
self.s3_bucket = workflow.default_remote_prefix.split("/")[0]
self.s3_subdir = re.sub(
"^{}/".format(self.s3_bucket), "", workflow.default_remote_prefix
)
logger.debug("precommand= " + self.precommand)
logger.debug("bucket=" + self.s3_bucket)
logger.debug("subdir=" + self.s3_subdir)
self.quiet = quiet
exec_job = (
"snakemake {target} --snakefile {snakefile} "
"--force --cores {cores} --keep-target-files --keep-remote "
"--latency-wait 0 --scheduler {workflow.scheduler_type} "
"--attempt 1 {use_threads} --max-inventory-time 0 "
"{overwrite_config} {rules} --nocolor "
"--notemp --no-hooks --nolock "
)
super().__init__(
workflow,
dag,
cores,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
latency_wait=latency_wait,
local_input=local_input,
restart_times=restart_times,
exec_job=exec_job,
assume_shared_fs=False,
max_status_checks_per_second=max_status_checks_per_second,
disable_default_remote_provider_args=True,
disable_get_default_resources_args=True,
)
self.container_image = container_image or get_container_image()
self.tibanna_config = tibanna_config
def shutdown(self):
# perform additional steps on shutdown if necessary
logger.debug("shutting down Tibanna executor")
super().shutdown()
def cancel(self):
from tibanna.core import API
for j in self.active_jobs:
logger.info("killing job {}".format(j.jobname))
while True:
try:
res = API().kill(j.exec_arn)
if not self.quiet:
print(res)
break
except KeyboardInterrupt:
pass
self.shutdown()
def split_filename(self, filename, checkdir=None):
f = os.path.abspath(filename)
if checkdir:
checkdir = checkdir.rstrip("/")
if f.startswith(checkdir):
fname = re.sub("^{}/".format(checkdir), "", f)
fdir = checkdir
else:
direrrmsg = (
"All source files including Snakefile, "
+ "conda env files, and rule script files "
+ "must be in the same working directory: {} vs {}"
)
raise WorkflowError(direrrmsg.format(checkdir, f))
else:
fdir, fname = os.path.split(f)
return fname, fdir
def remove_prefix(self, s):
return re.sub("^{}/{}/".format(self.s3_bucket, self.s3_subdir), "", s)
def handle_remote(self, target):
if isinstance(target, _IOFile) and target.remote_object.provider.is_default:
return self.remove_prefix(target)
else:
return target
def add_command(self, job, tibanna_args, tibanna_config):
# snakefile, with file name remapped
snakefile_fname = tibanna_args.snakemake_main_filename
# targets, with file name remapped
targets = job.get_targets()
if not isinstance(targets, list):
targets = [targets]
targets_default = " ".join([self.handle_remote(t) for t in targets])
# use_threads
use_threads = "--force-use-threads" if not job.is_group() else ""
# format command
command = self.format_job_pattern(
self.exec_job,
job,
target=targets_default,
snakefile=snakefile_fname,
use_threads=use_threads,
cores=tibanna_config["cpu"],
)
if self.precommand:
command = self.precommand + "; " + command
logger.debug("command = " + str(command))
tibanna_args.command = command
def add_workflow_files(self, job, tibanna_args):
snakefile_fname, snakemake_dir = self.split_filename(self.snakefile)
snakemake_child_fnames = []
for src in self.workflow_sources:
src_fname, _ = self.split_filename(src, snakemake_dir)
if src_fname != snakefile_fname: # redundant
snakemake_child_fnames.append(src_fname)
# change path for config files
self.workflow.overwrite_configfiles = [
self.split_filename(cf, snakemake_dir)[0]
for cf in self.workflow.overwrite_configfiles
]
tibanna_args.snakemake_directory_local = snakemake_dir
tibanna_args.snakemake_main_filename = snakefile_fname
tibanna_args.snakemake_child_filenames = list(set(snakemake_child_fnames))
def adjust_filepath(self, f):
if not hasattr(f, "remote_object"):
rel = self.remove_prefix(f) # log/benchmark
elif (
hasattr(f.remote_object, "provider") and f.remote_object.provider.is_default
):
rel = self.remove_prefix(f)
else:
rel = f
return rel
def make_tibanna_input(self, job):
from tibanna import ec2_utils, core as tibanna_core
# input & output
# Local snakemake command here must be run with --default-remote-prefix
# and --default-remote-provider (forced) but on VM these options will be removed.
# The snakemake on the VM will consider these input and output as not remote.
# They files are transferred to the container by Tibanna before running snakemake.
# In short, the paths on VM must be consistent with what's in Snakefile.
# but the actual location of the files is on the S3 bucket/prefix.
# This mapping info must be passed to Tibanna.
for i in job.input:
logger.debug("job input " + str(i))
logger.debug("job input is remote= " + ("true" if i.is_remote else "false"))
if hasattr(i.remote_object, "provider"):
logger.debug(
" is remote default= "
+ ("true" if i.remote_object.provider.is_default else "false")
)
for o in job.expanded_output:
logger.debug("job output " + str(o))
logger.debug(
"job output is remote= " + ("true" if o.is_remote else "false")
)
if hasattr(o.remote_object, "provider"):
logger.debug(
" is remote default= "
+ ("true" if o.remote_object.provider.is_default else "false")
)
file_prefix = (
"file:///data1/snakemake" # working dir inside snakemake container on VM
)
input_source = dict()
for ip in job.input:
ip_rel = self.adjust_filepath(ip)
input_source[os.path.join(file_prefix, ip_rel)] = "s3://" + ip
output_target = dict()
output_all = [eo for eo in job.expanded_output]
if job.log:
if isinstance(job.log, list):
output_all.extend([str(_) for _ in job.log])
else:
output_all.append(str(job.log))
if hasattr(job, "benchmark") and job.benchmark:
if isinstance(job.benchmark, list):
output_all.extend([str(_) for _ in job.benchmark])
else:
output_all.append(str(job.benchmark))
for op in output_all:
op_rel = self.adjust_filepath(op)
output_target[os.path.join(file_prefix, op_rel)] = "s3://" + op
# mem & cpu
mem = job.resources["mem_mb"] / 1024 if "mem_mb" in job.resources.keys() else 1
cpu = job.threads
# jobid, grouping, run_name
jobid = tibanna_core.create_jobid()
if job.is_group():
run_name = "snakemake-job-%s-group-%s" % (str(jobid), str(job.groupid))
else:
run_name = "snakemake-job-%s-rule-%s" % (str(jobid), str(job.rule))
# tibanna input
tibanna_config = {
"run_name": run_name,
"mem": mem,
"cpu": cpu,
"ebs_size": math.ceil(job.resources["disk_mb"] / 1024),
"log_bucket": self.s3_bucket,
}
logger.debug("additional tibanna config: " + str(self.tibanna_config))
if self.tibanna_config:
tibanna_config.update(self.tibanna_config)
tibanna_args = ec2_utils.Args(
output_S3_bucket=self.s3_bucket,
language="snakemake",
container_image=self.container_image,
input_files=input_source,
output_target=output_target,
input_env=self.envvars,
)
self.add_workflow_files(job, tibanna_args)
self.add_command(job, tibanna_args, tibanna_config)
tibanna_input = {
"jobid": jobid,
"config": tibanna_config,
"args": tibanna_args.as_dict(),
}
logger.debug(json.dumps(tibanna_input, indent=4))
return tibanna_input
def run(self, job, callback=None, submit_callback=None, error_callback=None):
logger.info("running job using Tibanna...")
from tibanna.core import API
super()._run(job)
# submit job here, and obtain job ids from the backend
tibanna_input = self.make_tibanna_input(job)
jobid = tibanna_input["jobid"]
exec_info = API().run_workflow(
tibanna_input,
sfn=self.tibanna_sfn,
verbose=not self.quiet,
jobid=jobid,
open_browser=False,
sleep=0,
)
exec_arn = exec_info.get("_tibanna", {}).get("exec_arn", "")
jobname = tibanna_input["config"]["run_name"]
jobid = tibanna_input["jobid"]
# register job as active, using your own namedtuple.
# The namedtuple must at least contain the attributes
# job, jobid, callback, error_callback.
self.active_jobs.append(
TibannaJob(job, jobname, jobid, exec_arn, callback, error_callback)
)
def _wait_for_jobs(self):
# busy wait on job completion
# This is only needed if your backend does not allow to use callbacks
# for obtaining job status.
from tibanna.core import API
while True:
# always use self.lock to avoid race conditions
with self.lock:
if not self.wait:
return
active_jobs = self.active_jobs
self.active_jobs = list()
still_running = list()
for j in active_jobs:
# use self.status_rate_limiter to avoid too many API calls.
with self.status_rate_limiter:
if j.exec_arn:
status = API().check_status(j.exec_arn)
else:
status = "FAILED_AT_SUBMISSION"
if not self.quiet or status != "RUNNING":
logger.debug("job %s: %s" % (j.jobname, status))
if status == "RUNNING":
still_running.append(j)
elif status == "SUCCEEDED":
j.callback(j.job)
else:
j.error_callback(j.job)
with self.lock:
self.active_jobs.extend(still_running)
sleep()
def run_wrapper(
job_rule,
input,
output,
params,
wildcards,
threads,
resources,
log,
benchmark,
benchmark_repeats,
conda_env,
container_img,
singularity_args,
env_modules,
use_singularity,
linemaps,
debug,
cleanup_scripts,
shadow_dir,
jobid,
edit_notebook,
conda_base_path,
basedir,
):
"""
Wrapper around the run method that handles exceptions and benchmarking.
Arguments
job_rule -- the ``job.rule`` member
input -- list of input files
output -- list of output files
wildcards -- so far processed wildcards
threads -- usable threads
log -- list of log files
shadow_dir -- optional shadow directory root
"""
# get shortcuts to job_rule members
run = job_rule.run_func
version = job_rule.version
rule = job_rule.name
is_shell = job_rule.shellcmd is not None
if os.name == "posix" and debug:
sys.stdin = open("/dev/stdin")
if benchmark is not None:
from snakemake.benchmark import (
BenchmarkRecord,
benchmarked,
write_benchmark_records,
)
# Change workdir if shadow defined and not using singularity.
# Otherwise, we do the change from inside the container.
passed_shadow_dir = None
if use_singularity and container_img:
passed_shadow_dir = shadow_dir
shadow_dir = None
try:
with change_working_directory(shadow_dir):
if benchmark:
bench_records = []
for bench_iteration in range(benchmark_repeats):
# Determine whether to benchmark this process or do not
# benchmarking at all. We benchmark this process unless the
# execution is done through the ``shell:``, ``script:``, or
# ``wrapper:`` stanza.
is_sub = (
job_rule.shellcmd
or job_rule.script
or job_rule.wrapper
or job_rule.cwl
)
if is_sub:
# The benchmarking through ``benchmarked()`` is started
# in the execution of the shell fragment, script, wrapper
# etc, as the child PID is available there.
bench_record = BenchmarkRecord()
run(
input,
output,
params,
wildcards,
threads,
resources,
log,
version,
rule,
conda_env,
container_img,
singularity_args,
use_singularity,
env_modules,
bench_record,
jobid,
is_shell,
bench_iteration,
cleanup_scripts,
passed_shadow_dir,
edit_notebook,
conda_base_path,
basedir,
)
else:
# The benchmarking is started here as we have a run section
# and the generated Python function is executed in this
# process' thread.
with benchmarked() as bench_record:
run(
input,
output,
params,
wildcards,
threads,
resources,
log,
version,
rule,
conda_env,
container_img,
singularity_args,
use_singularity,
env_modules,
bench_record,
jobid,
is_shell,
bench_iteration,
cleanup_scripts,
passed_shadow_dir,
edit_notebook,
conda_base_path,
basedir,
)
# Store benchmark record for this iteration
bench_records.append(bench_record)
else:
run(
input,
output,
params,
wildcards,
threads,
resources,
log,
version,
rule,
conda_env,
container_img,
singularity_args,
use_singularity,
env_modules,
None,
jobid,
is_shell,
None,
cleanup_scripts,
passed_shadow_dir,
edit_notebook,
conda_base_path,
basedir,
)
except (KeyboardInterrupt, SystemExit) as e:
# Re-raise the keyboard interrupt in order to record an error in the
# scheduler but ignore it
raise e
except (Exception, BaseException) as ex:
log_verbose_traceback(ex)
# this ensures that exception can be re-raised in the parent thread
lineno, file = get_exception_origin(ex, linemaps)
raise RuleException(
format_error(
ex, lineno, linemaps=linemaps, snakefile=file, show_traceback=True
)
)
if benchmark is not None:
try:
write_benchmark_records(bench_records, benchmark)
except (Exception, BaseException) as ex:
raise WorkflowError(ex)
|
mailing.py
|
from telegram import Update, Message as TelegramMessage, TelegramError, Bot, PhotoSize, Animation, Video, ParseMode
from telegram.ext import CallbackContext, ConversationHandler
from threading import Thread
import logging
import time
from ..constants import Message, Keyboard, States
from ..models import User
def __send_photo(bot: Bot, message: TelegramMessage, user_id: int):
photo = PhotoSize(
file_id=message.photo[0].file_id,
file_unique_id=message.photo[0].file_unique_id,
width=message.photo[0].width,
height=message.photo[0].height,
file_size=message.photo[0].file_size
)
bot.send_photo(
chat_id=user_id,
photo=photo,
caption=message.caption_html,
parse_mode=ParseMode.HTML,
reply_markup=message.reply_markup
)
def __send_animation(bot: Bot, message: TelegramMessage, user_id: int):
animation = Animation(
file_id=message.animation.file_id,
file_unique_id=message.animation.file_unique_id,
width=message.animation.width,
height=message.animation.height,
duration=message.animation.file_size
)
bot.send_animation(
chat_id=user_id,
animation=animation,
caption=message.caption_html,
parse_mode=ParseMode.HTML,
reply_markup=message.reply_markup
)
def __send_video(bot: Bot, message: TelegramMessage, user_id: int):
video = Video(
file_id=message.video.file_id,
file_unique_id=message.video.file_unique_id,
width=message.video.width,
height=message.video.height,
duration=message.video.file_size
)
bot.send_video(
chat_id=user_id,
video=video,
caption=message.caption_html,
parse_mode=ParseMode.HTML,
reply_markup=message.reply_markup
)
def __send_message(bot: Bot, message: TelegramMessage, user_id: int):
if message.photo:
__send_photo(bot, message, user_id)
if message.animation:
__send_animation(bot, message, user_id)
if message.video:
__send_video(bot, message, user_id)
elif message.text:
bot.send_message(
chat_id=user_id,
text=message.text_html,
parse_mode=ParseMode.HTML,
reply_markup=message.reply_markup
)
def __send_mailing(context: CallbackContext):
message = context.user_data['mailing_message']
users = User.select().where(User.active == True)
sent_count = 0
for user in users:
try:
__send_message(context.bot, message, user.user_id)
sent_count += 1
time.sleep(1 / 20)
logging.info(f'Mailing message sent to user {user.user_id} (total: {sent_count})')
except TelegramError as ex:
if ex.message == 'Forbidden: bot was blocked by the user':
user.active = False
user.save()
logging.info(f'User {user.user_id} became inactive')
else:
logging.error(ex)
except Exception as ex:
logging.error(ex)
return sent_count
def mailing_message_callback(update: Update, context: CallbackContext):
context.user_data['mailing_message'] = update.message
context.bot.send_message(
chat_id=update.effective_chat.id,
text=Message.received_mailing,
reply_markup=Keyboard.mailing
)
return States.received_mailing
def preview_mailing_callback(update: Update, context: CallbackContext):
message = context.user_data['mailing_message']
__send_message(context.bot, message, update.effective_user.id)
return States.received_mailing
def cancel_mailing_callback(update: Update, context: CallbackContext):
del context.user_data['mailing_message']
context.bot.send_message(
chat_id=update.effective_chat.id,
text=Message.mailing_canceled,
reply_markup=Keyboard.main
)
return ConversationHandler.END
def __send_mailing_callback(update: Update, context: CallbackContext):
context.bot.send_message(
chat_id=update.effective_chat.id,
text=Message.mailing_started,
reply_markup=Keyboard.main
)
logging.info('Mailing has started')
sent_count = __send_mailing(context)
del context.user_data['mailing_message']
context.bot.send_message(
chat_id=update.effective_chat.id,
text=Message.mailing_finished.format(sent_count=sent_count)
)
logging.info('Mailing has finished')
def send_mailing_callback(update: Update, context: CallbackContext):
mailing_thread = Thread(target=__send_mailing_callback, args=(update, context))
mailing_thread.start()
return ConversationHandler.END
|
downloader.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2020-2021 AVSystem <avsystem@avsystem.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import concurrent.futures
import contextlib
import http.server
import os
import socket
import threading
import time
import jni_test
from framework.coap_file_server import CoapFileServerThread
from framework.lwm2m_test import *
DUMMY_PAYLOAD = os.urandom(16 * 1024)
class CoapDownload:
class Test(jni_test.LocalSingleServerTest):
def setUp(self, coap_server: coap.Server = None):
super().setUp()
self.file_server_thread = CoapFileServerThread(
coap_server or coap.Server())
self.file_server_thread.start()
self.tempfile = tempfile.NamedTemporaryFile()
@property
def file_server(self):
return self.file_server_thread.file_server
def register_resource(self, path, *args, **kwargs):
with self.file_server as file_server:
file_server.set_resource(path, *args, **kwargs)
return file_server.get_resource_uri(path)
def tearDown(self):
try:
super().tearDown(timeout_s=5)
finally:
self.tempfile.close()
self.file_server_thread.join()
def read(self, path: Lwm2mPath):
req = Lwm2mRead(path)
self.serv.send(req)
res = self.serv.recv()
self.assertMsgEqual(Lwm2mContent.matching(req)(), res)
return res.content
def wait_until_downloads_finished(self):
while self.get_socket_count() > len(self.servers):
time.sleep(0.1)
class ReconnectTest(jni_test.LocalSingleServerTest):
def setUp(self, coap_server: coap.Server = None):
super().setUp()
self.communicate('remove-server')
self.assertDemoDeregisters()
self.file_server = (coap_server or coap.Server())
self.file_server.set_timeout(5)
self.tempfile = tempfile.NamedTemporaryFile()
def tearDown(self):
self.file_server.close()
self.tempfile.close()
super().tearDown(auto_deregister=False)
@contextlib.contextmanager
def downloadContext(self, path, psk_identity='',
psk_key='', payload=DUMMY_PAYLOAD):
# "download" command blocks until a connection is made, so we need to listen() in a separate thread
with concurrent.futures.ThreadPoolExecutor() as executor:
listen_future = executor.submit(self.file_server.listen)
self.communicate(
'download %s://127.0.0.1:%s%s %s %s %s' % (
'coaps' if isinstance(
self.file_server, coap.DtlsServer) else 'coap',
self.file_server.get_listen_port(), path, self.tempfile.name, psk_identity, psk_key))
listen_future.result()
self.wait_until_socket_count(1, timeout_s=1)
test_case = self
class DownloadContext:
def __init__(self):
self.seq_num = 0
self.block_size = 1024
@property
def read_offset(self):
return self.seq_num * self.block_size
def transferData(self, bytes_limit=len(payload)):
while self.read_offset < bytes_limit:
req = test_case.file_server.recv()
if self.read_offset != 0:
test_case.assertMsgEqual(CoapGet(path, options=[
coap.Option.BLOCK2(seq_num=self.seq_num, block_size=self.block_size, has_more=0)]), req)
else:
# NOTE: demo does not force any BLOCK2() option in
# first request at offset 0
test_case.assertMsgEqual(CoapGet(path), req)
block2opt = coap.Option.BLOCK2(
seq_num=self.seq_num,
has_more=(
len(DUMMY_PAYLOAD) > self.read_offset +
self.block_size),
block_size=self.block_size)
test_case.file_server.send(Lwm2mContent.matching(req)(
content=DUMMY_PAYLOAD[self.read_offset:
self.read_offset + self.block_size],
options=[block2opt]))
self.seq_num += 1
yield DownloadContext()
self.wait_until_socket_count(0, timeout_s=10)
with open(self.tempfile.name, 'rb') as f:
self.assertEqual(f.read(), DUMMY_PAYLOAD)
class CoapDownloadSockets(CoapDownload.Test):
def runTest(self):
self.communicate(
'download %s %s' %
(self.register_resource(
'/',
DUMMY_PAYLOAD),
self.tempfile.name))
self.wait_until_socket_count(2, timeout_s=2)
self.assertEqual(1, self.get_non_lwm2m_socket_count())
self.assertEqual('UDP', self.get_transport(socket_index=-1))
# make sure the download is actually done
self.wait_until_downloads_finished()
with open(self.tempfile.name, 'rb') as f:
self.assertEqual(f.read(), DUMMY_PAYLOAD)
class CoapDownloadDoesNotBlockLwm2mTraffic(CoapDownload.Test):
def runTest(self):
self.communicate(
'download %s %s' %
(self.register_resource(
'/',
DUMMY_PAYLOAD),
self.tempfile.name))
for _ in range(10):
self.read(ResPath.Server[0].Lifetime)
# make sure the download is actually done
self.wait_until_downloads_finished()
with open(self.tempfile.name, 'rb') as f:
self.assertEqual(f.read(), DUMMY_PAYLOAD)
class CoapDownloadIgnoresUnrelatedRequests(CoapDownload.Test):
def runTest(self):
self.communicate(
'download %s %s' %
(self.register_resource(
'/',
DUMMY_PAYLOAD),
self.tempfile.name))
self.wait_until_socket_count(2, timeout_s=1)
# wait for first request
while True:
with self.file_server as file_server:
if len(file_server.requests) != 0:
break
time.sleep(0.001)
for _ in range(10):
with self.file_server as file_server:
file_server._server.send(
Lwm2mRead(
ResPath.Device.SerialNumber).fill_placeholders())
# make sure the download is actually done
self.wait_until_downloads_finished()
with open(self.tempfile.name, 'rb') as f:
self.assertEqual(f.read(), DUMMY_PAYLOAD)
class CoapDownloadOverDtls(CoapDownload.Test):
PSK_IDENTITY = b'Help'
PSK_KEY = b'ImTrappedInAUniverseFactory'
def setUp(self):
super().setUp(
coap_server=coap.DtlsServer(
psk_key=self.PSK_KEY,
psk_identity=self.PSK_IDENTITY))
def runTest(self):
self.communicate('download %s %s %s %s' % (self.register_resource('/', DUMMY_PAYLOAD),
self.tempfile.name,
self.PSK_IDENTITY.decode(
'ascii'),
self.PSK_KEY.decode('ascii')))
# make sure the download is actually done
self.wait_until_downloads_finished()
with open(self.tempfile.name, 'rb') as f:
self.assertEqual(f.read(), DUMMY_PAYLOAD)
class CoapDownloadRetransmissions(CoapDownload.Test):
def runTest(self):
def should_ignore_request(req):
try:
seq_num = req.get_options(coap.Option.BLOCK2)[0].seq_num()
required_retransmissions = seq_num % 4
with self.file_server as file_server:
num_retransmissions = len(
[x for x in file_server.requests if x == req])
return num_retransmissions < required_retransmissions
except BaseException:
return False
with self.file_server as file_server:
file_server.set_resource('/', DUMMY_PAYLOAD)
file_server.should_ignore_request = should_ignore_request
uri = file_server.get_resource_uri('/')
self.communicate('download %s %s' % (uri, self.tempfile.name))
# make sure download succeeded
self.wait_until_downloads_finished()
with open(self.tempfile.name, 'rb') as f:
self.assertEqual(f.read(), DUMMY_PAYLOAD)
class HttpDownload:
class Test(jni_test.LocalSingleServerTest):
def make_request_handler(self):
raise NotImplementedError
def _create_server(self):
return http.server.HTTPServer(('', 0), self.make_request_handler())
def cv_wait(self):
with self._response_cv:
self._response_cv.wait()
def cv_notify_all(self):
with self._response_cv:
self._response_cv.notify_all()
def setUp(self, *args, **kwargs):
self.http_server = self._create_server()
self._response_cv = threading.Condition()
super().setUp(*args, **kwargs)
self.server_thread = threading.Thread(
target=lambda: self.http_server.serve_forever())
self.server_thread.start()
def tearDown(self, *args, **kwargs):
try:
super().tearDown(timeout_s=5, *args, **kwargs)
finally:
self.cv_notify_all()
self.http_server.shutdown()
self.server_thread.join()
class HttpSinglePacketDownloadDoesNotHangIfRemoteServerDoesntCloseConnection(
HttpDownload.Test):
CONTENT = b'foo'
def make_request_handler(self):
test_case = self
class RequestHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(http.HTTPStatus.OK)
self.send_header('Content-type', 'text/plain')
self.send_header('Content-length', str(len(test_case.CONTENT)))
self.end_headers()
self.wfile.write(test_case.CONTENT)
self.wfile.flush()
# Returning from this method makes the server close a TCP
# connection. Prevent this to check if the client behaves
# correctly even if server does not do this.
test_case.cv_wait()
def log_request(code='-', size='-'):
# don't display logs on successful request
pass
return RequestHandler
def runTest(self):
with tempfile.NamedTemporaryFile() as temp_file:
self.communicate('download http://127.0.0.1:%s %s' % (
self.http_server.server_address[1], temp_file.name))
LIMIT_S = 10
for _ in range(LIMIT_S * 10):
# when download finishes, its socket gets closed, leaving only
# LwM2M one
if self.get_socket_count() <= 1:
break
time.sleep(0.1)
else:
self.fail('download not completed on time')
class CoapDownloadReconnect(CoapDownload.ReconnectTest):
def runTest(self):
with self.downloadContext('/test') as ctx:
ctx.transferData(len(DUMMY_PAYLOAD) / 2)
req = self.file_server.recv()
self.assertMsgEqual(CoapGet('/test', options=[
coap.Option.BLOCK2(seq_num=ctx.seq_num, block_size=ctx.block_size, has_more=0)]), req)
previous_port = self.file_server.get_remote_addr()[1]
self.file_server.reset()
self.communicate('reconnect udp')
self.file_server.listen()
self.assertNotEqual(
self.file_server.get_remote_addr()[1],
previous_port)
ctx.transferData()
class CoapDownloadOffline(CoapDownload.ReconnectTest):
def runTest(self):
with self.downloadContext('/test') as ctx:
ctx.transferData(len(DUMMY_PAYLOAD) / 2)
req = self.file_server.recv()
self.assertMsgEqual(CoapGet('/test', options=[
coap.Option.BLOCK2(seq_num=ctx.seq_num, block_size=ctx.block_size, has_more=0)]), req)
previous_port = self.file_server.get_remote_addr()[1]
self.file_server.reset()
self.communicate('enter-offline udp')
with self.assertRaises(socket.timeout):
self.file_server.listen(timeout_s=5)
self.communicate('exit-offline udp')
self.file_server.listen()
self.assertNotEqual(
self.file_server.get_remote_addr()[1],
previous_port)
ctx.transferData()
|
pgc02.py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2019 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import gc
import os
import sys
import threading
class PGC(object):
_instance = None
def __init__(self):
self.pipe_name = 'fifo_%s#%s' % (
os.path.splitext(os.path.basename(sys.argv[0]))[0], os.getpid())
@classmethod
def instance(cls):
if cls._instance is None:
cls._instance = PGC()
return cls._instance
def start(self):
# gc.disable() don't work, some 3rd-party library will enable it back
gc.set_threshold(0)
if os.path.exists(self.pipe_name):
os.unlink(self.pipe_name)
os.mkfifo(self.pipe_name)
rdthread = threading.Thread(target=self.rd_proc)
rdthread.start()
wrthread = threading.Thread(target=self.wr_proc)
wrthread.start()
def rd_proc(self):
pass
def wr_proc(self):
pass
def main():
pass
if __name__ == '__main__':
main()
|
app_utils.py
|
# From http://www.pyimagesearch.com/2015/12/21/increasing-webcam-fps-with-python-and-opencv/
import struct
import six
import collections
import cv2
import datetime
from threading import Thread
from matplotlib import colors
class FPS:
def __init__(self):
# store the start time, end time, and total number of frames
# that were examined between the start and end intervals
self._start = None
self._end = None
self._numFrames = 0
def start(self):
# start the timer
self._start = datetime.datetime.now()
return self
def stop(self):
# stop the timer
self._end = datetime.datetime.now()
def update(self):
# increment the total number of frames examined during the
# start and end intervals
self._numFrames += 1
def elapsed(self):
# return the total number of seconds between the start and
# end interval
return (self._end - self._start).total_seconds()
def fps(self):
# compute the (approximate) frames per second
return self._numFrames / self.elapsed()
class WebcamVideoStream:
def __init__(self, src, width, height):
# initialize the video camera stream and read the first frame
# from the stream
#print(src)
self.stream = cv2.VideoCapture(src)
self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
(self.grabbed, self.frame) = self.stream.read()
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
Thread(target=self.update, args=()).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
def standard_colors():
colors = [
'AliceBlue', 'Chartreuse', 'Aqua', 'Aquamarine', 'Azure', 'Beige', 'Bisque',
'BlanchedAlmond', 'BlueViolet', 'BurlyWood', 'CadetBlue', 'AntiqueWhite',
'Chocolate', 'Coral', 'CornflowerBlue', 'Cornsilk', 'Crimson', 'Cyan',
'DarkCyan', 'DarkGoldenRod', 'DarkGrey', 'DarkKhaki', 'DarkOrange',
'DarkOrchid', 'DarkSalmon', 'DarkSeaGreen', 'DarkTurquoise', 'DarkViolet',
'DeepPink', 'DeepSkyBlue', 'DodgerBlue', 'FireBrick', 'FloralWhite',
'ForestGreen', 'Fuchsia', 'Gainsboro', 'GhostWhite', 'Gold', 'GoldenRod',
'Salmon', 'Tan', 'HoneyDew', 'HotPink', 'IndianRed', 'Ivory', 'Khaki',
'Lavender', 'LavenderBlush', 'LawnGreen', 'LemonChiffon', 'LightBlue',
'LightCoral', 'LightCyan', 'LightGoldenRodYellow', 'LightGray', 'LightGrey',
'LightGreen', 'LightPink', 'LightSalmon', 'LightSeaGreen', 'LightSkyBlue',
'LightSlateGray', 'LightSlateGrey', 'LightSteelBlue', 'LightYellow', 'Lime',
'LimeGreen', 'Linen', 'Magenta', 'MediumAquaMarine', 'MediumOrchid',
'MediumPurple', 'MediumSeaGreen', 'MediumSlateBlue', 'MediumSpringGreen',
'MediumTurquoise', 'MediumVioletRed', 'MintCream', 'MistyRose', 'Moccasin',
'NavajoWhite', 'OldLace', 'Olive', 'OliveDrab', 'Orange', 'OrangeRed',
'Orchid', 'PaleGoldenRod', 'PaleGreen', 'PaleTurquoise', 'PaleVioletRed',
'PapayaWhip', 'PeachPuff', 'Peru', 'Pink', 'Plum', 'PowderBlue', 'Purple',
'Red', 'RosyBrown', 'RoyalBlue', 'SaddleBrown', 'Green', 'SandyBrown',
'SeaGreen', 'SeaShell', 'Sienna', 'Silver', 'SkyBlue', 'SlateBlue',
'SlateGray', 'SlateGrey', 'Snow', 'SpringGreen', 'SteelBlue', 'GreenYellow',
'Teal', 'Thistle', 'Tomato', 'Turquoise', 'Violet', 'Wheat', 'White',
'WhiteSmoke', 'Yellow', 'YellowGreen'
]
return colors
def color_name_to_rgb():
colors_rgb = []
for key, value in colors.cnames.items():
colors_rgb.append((key, struct.unpack('BBB', bytes.fromhex(value.replace('#', '')))))
return dict(colors_rgb)
def draw_boxes_and_labels(
boxes,
classes,
scores,
category_index,
instance_masks=None,
keypoints=None,
max_boxes_to_draw=20,
min_score_thresh=.5,
agnostic_mode=False):
"""Returns boxes coordinates, class names and colors
Args:
boxes: a numpy array of shape [N, 4]
classes: a numpy array of shape [N]
scores: a numpy array of shape [N] or None. If scores=None, then
this function assumes that the boxes to be plotted are groundtruth
boxes and plot all boxes as black with no classes or scores.
category_index: a dict containing category dictionaries (each holding
category index `id` and category name `name`) keyed by category indices.
instance_masks: a numpy array of shape [N, image_height, image_width], can
be None
keypoints: a numpy array of shape [N, num_keypoints, 2], can
be None
max_boxes_to_draw: maximum number of boxes to visualize. If None, draw
all boxes.
min_score_thresh: minimum score threshold for a box to be visualized
agnostic_mode: boolean (default: False) controlling whether to evaluate in
class-agnostic mode or not. This mode will display scores but ignore
classes.
"""
# Create a display string (and color) for every box location, group any boxes
# that correspond to the same location.
box_to_display_str_map = collections.defaultdict(list)
box_to_color_map = collections.defaultdict(str)
box_to_instance_masks_map = {}
box_to_keypoints_map = collections.defaultdict(list)
if not max_boxes_to_draw:
max_boxes_to_draw = boxes.shape[0]
for i in range(min(max_boxes_to_draw, boxes.shape[0])):
if scores is None or scores[i] > min_score_thresh:
box = tuple(boxes[i].tolist())
if instance_masks is not None:
box_to_instance_masks_map[box] = instance_masks[i]
if keypoints is not None:
box_to_keypoints_map[box].extend(keypoints[i])
if scores is None:
box_to_color_map[box] = 'black'
else:
if not agnostic_mode:
if classes[i] in category_index.keys():
class_name = category_index[classes[i]]['name']
else:
class_name = 'N/A'
display_str = '{}: {}%'.format(
class_name,
int(100 * scores[i]))
else:
display_str = 'score: {}%'.format(int(100 * scores[i]))
box_to_display_str_map[box].append(display_str)
if agnostic_mode:
box_to_color_map[box] = 'DarkOrange'
else:
box_to_color_map[box] = standard_colors()[
classes[i] % len(standard_colors())]
# Store all the coordinates of the boxes, class names and colors
color_rgb = color_name_to_rgb()
rect_points = []
class_names = []
class_colors = []
for box, color in six.iteritems(box_to_color_map):
ymin, xmin, ymax, xmax = box
rect_points.append(dict(ymin=ymin, xmin=xmin, ymax=ymax, xmax=xmax))
class_names.append(box_to_display_str_map[box])
class_colors.append(color_rgb[color.lower()])
return rect_points, class_names, class_colors
|
merge_tiles.py
|
# coding=utf-8
from __future__ import print_function, unicode_literals, division
import json
import math
import os
import random
import threading
from itertools import chain, product
from logger import logger
from PIL import Image
try:
import Queue
import urlparse
from urllib import urlencode
except ImportError: # For Python 3
import urllib.parse as urlparse
from urllib.parse import urlencode
from queue import Queue
from scripts.utils import make_request, TimeoutException
VERSION = "1.0.0"
def chunks(l, n):
""" Yield successive n-sized chunks from l.
"""
for i in range(0, len(l), n):
yield l[i:i + n]
def has_live_threads(threads):
return True in [t.isAlive() for t in threads]
def thread_download(target, xy_tile, total, thread_count=4):
result = Queue.Queue()
def task_wrapper(*args):
try:
result.put(target(*args))
except TimeoutException:
logger.warning("Waiting time exceeded")
thread_count = total // 4 if total >= thread_count else total
threads = [threading.Thread(target=task_wrapper, args=(p,)) for p in list(chunks(xy_tile, thread_count))]
for t in threads:
t.daemon = True
t.start()
while has_live_threads(threads):
try:
# synchronization timeout of threads kill
[t.join(1) for t in threads
if t is not None and t.isAlive()]
except KeyboardInterrupt:
# Ctrl-C handling and send kill to threads
for t in threads:
t.kill_received = True
raise
return result
class TileMerger:
"""
:param bbox: (minLat, minLng, maxLat, maxLng)
:type bbox: tuple
"""
output_dir = 'tmp'
file_name_prefix = 'merge'
crs = 3857
stream_method = thread_download
tile_size = tuple()
image_size = tuple()
use_cache = True
def __init__(self, zoom, bbox, tile_format='.jpg', threads=1, file_name_prefix=None, output_dir=None,
with_log=True, make_request=make_request):
if output_dir:
self.output_dir = output_dir
if file_name_prefix:
self.file_name_prefix = file_name_prefix
self.with_log = with_log
self.stop = False
self.make_request = make_request
self.threads = threads
self.total = 0
self.count = 0
self.zoom = zoom
self.tile_format = tile_format
self.bbox = bbox
self.xy_range = self.set_xy_range()
self.total = self.calc_total()
self.tile_dir = self.get_tile_dir(zoom)
if not os.path.exists(self.tile_dir):
os.makedirs(self.tile_dir)
def get_tile_dir(self, zoom):
return os.path.join(self.output_dir, "%s_%s" % (self.file_name_prefix, zoom))
@staticmethod
def write_image(image, path):
with open(path, 'wb') as im:
im.write(image)
def set_xy_range(self):
if len(self.bbox) != 4:
raise Exception("Coordinate input error!")
bbox = self.bbox
keys = ("xMin", "xMax", "yMin", "yMax")
if bbox:
xy = list(chain(*map(sorted, zip(*[deg2num(l[0], l[1], self.zoom) for l in (bbox[:2], bbox[2:])]))))
return dict(zip(keys, xy))
else:
return dict.fromkeys(keys, 0)
def calc_total(self):
xy = self.xy_range
return (xy["xMax"] - xy["xMin"] + 1) * (xy["yMax"] - xy["yMin"] + 1)
def download(self):
self.log(u'Run tiles download:')
self.stop = False
if self.bbox:
self.bbox_download()
else:
self.lazy_download()
if self.count == self.total:
im = Image.open(os.path.join(self.tile_dir, os.listdir(self.tile_dir)[0]))
buffer = im.load()
self.image_size = im.size
print("")
self.log('Downloading completed. Uploaded tiles - %s' % self.count)
return self.count
@staticmethod
def stream(*args, **kwargs):
thread_download(*args, **kwargs)
def bbox_download(self):
xy = self.xy_range
p = list(product(range(xy['xMin'], xy['xMax'] + 1), range(xy['yMin'], xy['yMax'] + 1)))
self.stream(target=self.fetch_tile, xy_tile=p, total=self.total)
if self.with_log:
pass
def fetch_tile(self, porties):
for x, y in sorted(porties, key=lambda k: random.random()):
if not self.stop:
file_name = "%s_%s%s" % (x, y, self.tile_format)
file_path = os.path.join(self.tile_dir, file_name)
if not self.use_cache or not os.path.isfile(file_path):
url = self.get_url(x, y, self.zoom)
tile = self.make_request(url)
if tile:
self.write_image(tile, file_path)
self.count += 1
else:
self.count += 1
if self.with_log:
print("\r%d%% %d/%d" % ((self.count / self.total) * 100, self.count, self.total), end='')
def lazy_download(self):
row, col = True, True
x, y, count = 0, 0, 0
while row:
while col:
url_path = self.get_url(x, y, self.zoom)
tile = self.make_request(url_path)
if tile.getcode() == 200:
self.write_image(tile.read(), os.path.join(self.tile_dir, "%s_%s%s" % (x, y, self.tile_format)))
if y > self.xy_range["yMax"]:
self.xy_range["yMax"] = y
count += 1
y += 1
else:
col = False
if y == 0:
row = False
else:
self.xy_range["xMax"] = x
col, x, y = True, x + 1, 0
return count
def merge_tiles(self):
if self.count == self.total:
self.log('Merging tiles...')
xy_range = self.xy_range
filename = '%s_%d_%s%s' % (self.file_name_prefix, self.zoom,
''.join(set([str(int(g)) for g in xy_range.values()])), self.tile_format)
out = Image.new('RGB', ((xy_range["xMax"] + 1 - xy_range["xMin"]) * self.image_size[0],
(xy_range["yMax"] + 1 - xy_range["yMin"]) * self.image_size[1]))
imx = 0
for x in range(xy_range["xMin"], xy_range["xMax"] + 1):
imy = 0
for y in range(xy_range["yMin"], xy_range["yMax"] + 1):
tile_file = os.path.join(self.tile_dir, "%s_%s%s" % (x, y, self.tile_format))
tile = Image.open(tile_file)
out.paste(tile, (imx, imy))
imy += self.image_size[1]
imx += self.image_size[0]
path = os.path.join(self.output_dir, filename)
out.save(path)
# self.create_raster_worldfile(path)
# self.create_prj_file(path)
outpath = os.path.abspath(path)
self.log('You raster - %s' % outpath)
return outpath
# def create_raster_worldfile(self, path, xy_range=None):
# from globalmaptiles import GlobalMercator
# x_y = xy_range or self.xy_range
# im = Image.open(path)
# gw_path = ''.join(os.path.split(path)[-1].split('.')[:-1])
# world_file_path = os.path.join(os.path.curdir, os.path.join(self.output_dir, "%s.jgw" % gw_path))
# with open(world_file_path, 'w') as world:
# min_y, min_x = num2deg(x_y['xMin'], x_y['yMax'] + 1, self.zoom)
# max_y, max_x = num2deg(x_y['xMax'] + 1, x_y['yMin'], self.zoom)
# gm = GlobalMercator()
# min_x, min_y = gm.LatLonToMeters(min_y, min_x)
# max_x, max_y = gm.LatLonToMeters(max_y, max_x)
# x_pixel_size = (max_x - min_x) / im.size[0]
# y_pixel_size = (max_y - min_y) / im.size[1]
# world.write(b"%f\n" % x_pixel_size) # pixel size in the x-direction in map units/pixel
# world.write(b"%f\n" % 0) # rotation about y-axis
# world.write(b"%f\n" % 0) # rotation about x-axis
# world.write(b"%f\n" % -(abs(y_pixel_size))) # pixel size in the y-direction in map units. Always negative
# world.write(b"%f\n" % min_x) # x-coordinate of the center of the upper left pixel
# world.write(b"%f\n" % max_y) # y-coordinate of the center of the upper left pixel
#
# def create_prj_file(self, path, crs=None):
# crs = crs or self.crs
# prj_str = {
# 4326: b"""
# GEOGCS["GCS_WGS_1984",DATUM["D_WGS84",SPHEROID["WGS84",6378137,298.257223563]],
# PRIMEM["Greenwich",0],UNIT["Degree",0.017453292519943295]]
# """,
# 3857: b"""
# PROJCS["WGS_1984_Web_Mercator_Auxiliary_Sphere",GEOGCS["GCS_WGS_1984",DATUM["D_WGS_1984",
# SPHEROID["WGS_1984",6378137,0]],PRIMEM["Greenwich",0],UNIT["Degree",0.017453292519943295]],
# PROJECTION["Mercator"],PARAMETER["central_meridian",0],PARAMETER["standard_parallel_1",0],
# PARAMETER["false_easting",0],PARAMETER["false_northing",0],PARAMETER["Auxiliary_Sphere_Type",0],
# UNIT["Meter",1]]
# """
# }
# prj_path = ''.join(os.path.split(path)[-1].split('.')[:-1])
# prj_file_path = os.path.join(os.path.curdir, os.path.join(self.output_dir, "%s.prj" % prj_path))
# prj = open(prj_file_path, 'w')
# prj.write(prj_str[crs])
# prj.close()
def log(self, msg):
if self.with_log:
print(msg)
class UrlTileMerger(TileMerger, object):
""" Read tile from custom URL
:param url: query template 'http[s]://{s}.some_tile_service_address/{x}/{y}/{z}{f}'
{x},{y} - tile position
{z} - zoom level
{s} - subdomains
{f} - image format
"""
def __init__(self, url, **kwargs):
super(UrlTileMerger, self).__init__(**kwargs)
self.url = url
@staticmethod
def simple_url(x, y, z, url, f='.jpg'):
return url.format(**locals())
def get_url(self, x, y, z):
return self.simple_url(x, y, z, self.url, f=self.tile_format)
class BingMerger(TileMerger, object):
url = "http://t{s}.tiles.virtualearth.net/tiles/a{q}.jpeg?g=1398"
file_name_prefix = 'bing'
crs = 3857
def get_url(self, x, y, z):
return self.url.format(q=self._quad_key(x, y, z), s=random.choice([0, 1, 2, 3, 4]))
@staticmethod
def _quad_key(x, y, z):
quad_key = []
for i in range(z, 0, -1):
digit = 0
mask = 1 << (i - 1)
if (x & mask) != 0:
digit += 1
if (y & mask) != 0:
digit += 2
quad_key.append(str(digit))
return ''.join(quad_key)
class GoogleMerger(UrlTileMerger):
url = "http://mt1.google.com/vt/lyrs=s&x={x}&y={y}&z={z}" # "http://khm0.googleapis.com/kh?v=173&x={x}&y={y}&z={z}"
file_name_prefix = 'google'
crs = 3857
def __init__(self, bbox, zoom, **kwargs):
super(GoogleMerger, self).__init__(bbox=bbox, zoom=zoom, url=self.url, **kwargs)
class PkkAreaMerger(TileMerger, object):
file_name_prefix = 'pkk'
url = "https://pkk5.rosreestr.ru/arcgis/rest/services/Cadastre/CadastreSelected/MapServer/export"
crs = 3857
# tile_size = (300000, 300000)
tile_size = (1000, 1000)
use_cache = False
max_count = 50
def __init__(self, output_format, clear_code, **kwargs):
super(PkkAreaMerger, self).__init__(zoom=0, tile_format='.%s' % output_format,
file_name_prefix=clear_code, **kwargs)
self.file_name_prefix = clear_code.replace(":", "_")
self.output_format = output_format
self.clear_code = clear_code
self.extent = self.bbox
self.real_width = 0
self.real_height = 0
self._image_extent_list = []
self.image_extent = {}
if self.total == 1:
xy = self.xy_range
max_size = max(int(math.ceil((xy["xMax"] - xy["xMin"]))), int(math.ceil((xy["yMax"] - xy["yMin"]))))
self.tile_size = (max_size, max_size)
elif self.total > self.max_count:
self._optimize_tile_size(self.max_count)
def get_tile_dir(self, zoom):
return os.path.join(self.output_dir, "%s" % self.file_name_prefix.replace(":", "_"))
def bbox_download(self):
dx, dy = self._get_delta()
p = list(product(range(dx), range(dy)))
self.stream(target=self.fetch_tile, xy_tile=p, total=self.total)
def get_url(self, x, y, z=None):
return self.get_image_url(x, y)
def set_xy_range(self):
if len(self.bbox) != 4:
raise Exception("Coordinate input error!")
bb = self.bbox
keys = ("xMin", "xMax", "yMin", "yMax")
if bb:
return dict(zip(keys, [bb[0], bb[2], bb[1], bb[3]]))
def _get_delta(self, tile_size=False):
tile_size = tile_size if tile_size else self.tile_size
xy = self.xy_range
dx = int(math.ceil((xy["xMax"] - xy["xMin"]) / tile_size[0]))
dy = int(math.ceil((xy["yMax"] - xy["yMin"]) / tile_size[1]))
return dx, dy
def _optimize_tile_size(self, count):
h = count**0.5
xy = self.xy_range
x = int((xy["xMax"] - xy["xMin"]) / h)
y = int((xy["yMax"] - xy["yMin"]) / h)
max_value = max([x,y])
self.tile_size = [max_value, max_value]
self.total = self.calc_total()
def calc_total(self, d=False):
d = d if d else self._get_delta()
total = 1
for x in d:
total *= x
return total
def _get_bbox_by_xy(self, x, y):
bbox = self.xy_range
xMin = bbox["xMin"] + (x * self.tile_size[0])
xMax = bbox["xMin"] + ((x + 1) * self.tile_size[0])
yMin = bbox["yMin"] + (y * self.tile_size[1])
yMax = bbox["yMin"] + ((y + 1) * self.tile_size[1])
return [xMax, yMax, xMin, yMin]
def get_image_url(self, x, y):
output_format = self.output_format
if self.clear_code and self.extent:
if self.total == 1:
dx, dy = map(lambda x: x if x > 500 else 500, self.tile_size)
else:
dx, dy = self.tile_size
code = self.clear_code
layers = map(str, range(0, 20))
params = {
"dpi": 96,
"transparent": "false",
"format": "png",
"layers": "show:%s" % ",".join(layers),
"bbox": ",".join(map(str, self._get_bbox_by_xy(x, y))),
"bboxSR": 102100,
"imageSR": 102100,
"size": "%s,%s" % (dx, dy),
"layerDefs": {layer: str("ID = '%s'" % code) for layer in layers},
"f": "json"
}
if output_format:
params["format"] = output_format
url_parts = list(urlparse.urlparse(self.url))
query = dict(urlparse.parse_qsl(url_parts[4]))
query.update(params)
url_parts[4] = urlencode(query)
meta_url = urlparse.urlunparse(url_parts)
if meta_url:
try:
response = self.make_request(meta_url)
data = json.loads(response)
if data.get("href"):
self._image_extent_list.append(data.get("extent"))
return meta_url.replace("f=json", "f=image")
else:
logger.warning("Can't get image meta data from: %s" % meta_url)
except Exception as er:
logger.warning(er)
elif not self.extent:
logger.warning("Can't get image without extent")
return False
def _merge_tiles(self):
dx, dy = self._get_delta()
self.log('Merging tiles...')
filename = '%s%s' % (self.file_name_prefix, self.tile_format)
tiles = []
imx = 0
imy = 0
for x in range(dx):
imy = 0
height = 0
for y in reversed(range(dy)):
tile_file = os.path.join(self.tile_dir, "%s_%s%s" % (x, y, self.tile_format))
try:
tile = Image.open(tile_file)
tiles.append((tile, (imx, imy)))
imy += tile.width
if tile.height > height:
height = tile.height
except Exception as er:
logger.warning(er)
imx += height
path = os.path.join(self.output_dir, filename)
self.real_width = imx
self.real_height = imy
out = Image.new('RGB', (self.real_width, self.real_height))
for t in tiles:
out.paste(t[0], t[1])
out.save(path)
return path
def merge_tiles(self):
if self.count == self.total:
if self.count > 1:
path = self._merge_tiles()
else:
path = os.path.join(self.tile_dir, "%s_%s%s" % (0, 0, self.tile_format))
tile = Image.open(path)
self.real_width = tile.width
self.real_height = tile.height
bb = self.bbox
xmax = max([x["xmax"] for x in self._image_extent_list])
ymax = max([x["ymax"] for x in self._image_extent_list])
self.image_extent = {"xmin": bb[0], "ymin": bb[1], "xmax": xmax, "ymax": ymax}
outpath = os.path.abspath(path)
self.log('You raster - %s' % outpath)
return outpath
def deg2num(lat_deg, lon_deg, zoom):
lat_rad = math.radians(lat_deg)
n = 2.0 ** zoom
xtile = int((lon_deg + 180.0) / 360.0 * n)
ytile = int((1.0 - math.log(math.tan(lat_rad) + (1 / math.cos(lat_rad))) / math.pi) / 2.0 * n)
return (xtile, ytile)
def num2deg(xtile, ytile, zoom):
"""
This returns the NW-corner of the square.
Use the function with xtile+1 and/or ytile+1 to get the other corners.
With xtile+0.5 & ytile+0.5 it will return the center of the tile.
"""
n = 2.0 ** zoom
lon_deg = xtile / n * 360.0 - 180.0
lat_rad = math.atan(math.sinh(math.pi * (1 - 2 * ytile / n)))
lat_deg = math.degrees(lat_rad)
return (lat_deg, lon_deg)
LAYERS = {
'Bing': BingMerger,
'Google': GoogleMerger
}
def get_available_layers():
return LAYERS
def check_bbox_str(bbox):
b = map(float, bbox.split())
if len(b) != 4:
return False
return all(map(lambda x: b[x + 2] - b[x] >= 0, [0, 1]))
|
title_screen.py
|
import pygame
import socket
import errno
import threading
from button import Button
from text import Text, TextFeed
from textbox import TextBox
from message import Message
from instructions import Instruction
from cards import Deck, Card
class TitleScreen:
UPDATE_FREQUENCY = 1000
def __init__(self, screen_size=(1280, 720), title="Mongoose", clear_colour=(66, 135, 245)):
self.screen_size = screen_size
self.title = title
self.clear_colour = clear_colour
pygame.init()
self.screen = pygame.display.set_mode(screen_size, pygame.DOUBLEBUF | pygame.RESIZABLE)
pygame.display.set_caption(title)
self.clock = pygame.time.Clock()
self.__title_text = Text(title, 64, text_colour=(255, 255, 255))
self.__name_input = TextBox((0.5, 0.4), (0.4, 0.06),
Text(font_size=32, font_hierarchy=["Verdana"]),
Text("Name", font_size=32, font_hierarchy=["Verdana"], text_colour=(64, 64, 64)),
register_group="title_screen")
self.__ip_input = TextBox((0.5, 0.5), (0.4, 0.06),
Text(font_size=32, font_hierarchy=["Verdana"]),
Text("IP Address", font_size=32, font_hierarchy=["Verdana"],
text_colour=(64, 64, 64)),
register_group="title_screen")
self.__port_input = TextBox((0.5, 0.6), (0.4, 0.06),
Text(font_size=32, font_hierarchy=["Verdana"]),
Text("Port", font_size=32, font_hierarchy=["Verdana"], text_colour=(64, 64, 64)),
register_group="title_screen")
self.__join_button = Button("Join", (0.5, 0.8), (0.1, 0.08), register_group="title_screen")
self.__join_button.subscribe_event(self.join_game)
self.__status_text = Text("Status: Not connected", font_size=28,
font_hierarchy=["Verdana"], text_colour=(255, 0, 0))
self.__info_feed = TextFeed((0.85, 0.5), (0.3, 0.3))
self.client_socket = None
self.__connected_to_server = False
# self.__server_handling_thread = threading.Thread(target=self.handle_server_io, daemon=True)
# self.__server_handling_thread.start()
self.__sync_deck = None
self.__game_package = []
self.__join_game_thread = None
def run(self):
while not self.__game_package:
pygame.event.pump()
for event in pygame.event.get():
if event.type == pygame.VIDEORESIZE:
self.screen_size = (event.w, event.h)
self.screen = pygame.display.set_mode(self.screen_size, pygame.DOUBLEBUF | pygame.RESIZABLE)
if event.type == pygame.QUIT:
self.quit()
TextBox.update_all("title_screen", self.screen_size, event)
mouse_pos = pygame.mouse.get_pos()
mouse_pressed = pygame.mouse.get_pressed()
Button.update_all("title_screen", self.screen_size, mouse_pos, mouse_pressed)
self.render()
self.handle_server_io()
self.clock.tick(60)
return self.__game_package
def render(self):
self.screen.fill(self.clear_colour)
self.__title_text.render(self.screen, (0.5, 0.2))
Button.render_all("title_screen", self.screen)
TextBox.render_all("title_screen", self.screen)
self.__status_text.render_from_corner(self.screen, (0.1 * self.screen_size[0], 0.8 * self.screen_size[1]))
self.__info_feed.render(self.screen)
pygame.display.flip()
def join_game(self):
if self.__join_game_thread is not None:
if self.__join_game_thread.is_alive():
return
self.__join_game_thread = threading.Thread(target=self.join_game_async)
self.__join_game_thread.start()
def join_game_async(self):
if not self.__port_input.text.isnumeric() or self.__connected_to_server:
return
ip = self.__ip_input.text
port = int(self.__port_input.text)
try:
self.__status_text.text = f"Status: Connecting to server..."
self.__status_text.text_colour = (255, 170, 0)
self.__status_text.update()
self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.client_socket.settimeout(10)
self.client_socket.connect((ip, port))
self.client_socket.setblocking(False)
self.__status_text.text = f"Status: Connected to {ip}:{port}. Waiting for game..."
self.__status_text.text_colour = (0, 255, 0)
self.__status_text.update()
name_message = Message.new_send_message(
f"{Instruction.SET_PROPERTY}:'name':'{self.__name_input.text}'".encode("utf-8")
)
self.client_socket.sendall(name_message.encode())
self.__connected_to_server = True
except ConnectionRefusedError:
self.__status_text.text = f"Status: Connection to {ip}:{port} failed."
self.__status_text.text_colour = (255, 0, 0)
self.__status_text.update()
except socket.timeout:
self.__status_text.text = f"Status: Connection to {ip}:{port} timed out."
self.__status_text.text_colour = (255, 0, 0)
self.__status_text.update()
def handle_server_io(self):
if not self.__connected_to_server:
return
try:
message = Message.new_recv_message()
buffer = self.client_socket.recv(Message.BUFFER_SIZE)
if not buffer:
self.__status_text.text = f"Status: Lost connection to server."
self.__status_text.text_colour = (255, 0, 0)
self.__status_text.update()
self.client_socket.close()
self.__connected_to_server = False
while not message.decode(buffer):
buffer = self.client_socket.recv(Message.BUFFER_SIZE)
self.decode_instruction(message.message.decode("utf-8"))
except IOError as e:
if e.errno != errno.EAGAIN and e.errno != errno.EWOULDBLOCK:
self.__status_text.text = f"Error: {e}"
self.__status_text.text_colour = (255, 0, 0)
self.__status_text.update()
self.client_socket.close()
self.__connected_to_server = False
def decode_instruction(self, message):
operands = []
if ":" in message:
instruction, operand = message.split(":", 1)
in_string = False
cur_operand = ""
for c in operand:
if c == "'":
in_string = not in_string
else:
if in_string:
cur_operand += c
elif c == ":":
operands.append(cur_operand)
cur_operand = ""
operands.append(cur_operand)
else:
instruction = message
if instruction == Instruction.Update.GAME_RUNNING:
self.__status_text.text = f"Status: Game already running on server."
self.__status_text.text_colour = (255, 170, 0)
self.__status_text.update()
self.client_socket.close()
self.__connected_to_server = False
if instruction == Instruction.START_GAME:
active_id = int(operands[0])
players = []
_p = []
for i, o in enumerate(operands[1:]):
# even: name, odd: id
if i % 2 == 0:
_p = [o]
else:
_p.append(int(o))
players.append(_p)
self.start_game(active_id, sorted(players, key=lambda x: x[1]))
if instruction == Instruction.Update.PLAYER_JOINED:
assert len(operands) == 1
self.__info_feed.add_line(f"Player {operands[0]} joined the game.")
if instruction == Instruction.Game.SEND_DECK:
assert len(operands) == 52
suit_map = {"0": "Spades", "1": "Diamonds", "2": "Clubs", "3": "Hearts"}
cards = []
for card in operands:
s, v = card.split("-")
cards.append(Card(suit_map[s], int(v)))
self.__sync_deck = Deck(cards)
def start_game(self, active_id, players):
self.__game_package = [active_id, players, self.client_socket, self.__sync_deck]
def quit(self):
if self.__connected_to_server:
self.client_socket.sendall(Message.new_send_message(Instruction.Update.QUIT_GAME.encode("utf-8")).encode())
# self.__server_handling_thread.join(0.5)
pygame.quit()
quit()
|
test_session.py
|
import os
import threading
import time
import socket
import importlib
from six.moves.http_client import HTTPConnection
import pytest
from path import Path
import cherrypy
from cherrypy._cpcompat import (
json_decode,
HTTPSConnection,
)
from cherrypy.lib import sessions
from cherrypy.lib import reprconf
from cherrypy.lib.httputil import response_codes
from cherrypy.test import helper
localDir = os.path.dirname(__file__)
def http_methods_allowed(methods=['GET', 'HEAD']):
method = cherrypy.request.method.upper()
if method not in methods:
cherrypy.response.headers['Allow'] = ', '.join(methods)
raise cherrypy.HTTPError(405)
cherrypy.tools.allow = cherrypy.Tool('on_start_resource', http_methods_allowed)
def setup_server():
@cherrypy.config(**{
'tools.sessions.on': True,
'tools.sessions.storage_class': sessions.RamSession,
'tools.sessions.storage_path': localDir,
'tools.sessions.timeout': (1.0 / 60),
'tools.sessions.clean_freq': (1.0 / 60),
})
class Root:
@cherrypy.expose
def clear(self):
cherrypy.session.cache.clear()
@cherrypy.expose
def data(self):
cherrypy.session['aha'] = 'foo'
return repr(cherrypy.session._data)
@cherrypy.expose
def testGen(self):
counter = cherrypy.session.get('counter', 0) + 1
cherrypy.session['counter'] = counter
yield str(counter)
@cherrypy.expose
def testStr(self):
counter = cherrypy.session.get('counter', 0) + 1
cherrypy.session['counter'] = counter
return str(counter)
@cherrypy.expose
@cherrypy.config(**{'tools.sessions.on': False})
def set_session_cls(self, new_cls_name):
new_cls = reprconf.attributes(new_cls_name)
cfg = {'tools.sessions.storage_class': new_cls}
self.__class__._cp_config.update(cfg)
if hasattr(cherrypy, 'session'):
del cherrypy.session
if new_cls.clean_thread:
new_cls.clean_thread.stop()
new_cls.clean_thread.unsubscribe()
del new_cls.clean_thread
@cherrypy.expose
def index(self):
sess = cherrypy.session
c = sess.get('counter', 0) + 1
time.sleep(0.01)
sess['counter'] = c
return str(c)
@cherrypy.expose
def keyin(self, key):
return str(key in cherrypy.session)
@cherrypy.expose
def delete(self):
cherrypy.session.delete()
sessions.expire()
return 'done'
@cherrypy.expose
def delkey(self, key):
del cherrypy.session[key]
return 'OK'
@cherrypy.expose
def redir_target(self):
return self._cp_config['tools.sessions.storage_class'].__name__
@cherrypy.expose
def iredir(self):
raise cherrypy.InternalRedirect('/redir_target')
@cherrypy.expose
@cherrypy.config(**{
'tools.allow.on': True,
'tools.allow.methods': ['GET'],
})
def restricted(self):
return cherrypy.request.method
@cherrypy.expose
def regen(self):
cherrypy.tools.sessions.regenerate()
return 'logged in'
@cherrypy.expose
def length(self):
return str(len(cherrypy.session))
@cherrypy.expose
@cherrypy.config(**{
'tools.sessions.path': '/session_cookie',
'tools.sessions.name': 'temp',
'tools.sessions.persistent': False,
})
def session_cookie(self):
# Must load() to start the clean thread.
cherrypy.session.load()
return cherrypy.session.id
cherrypy.tree.mount(Root())
class SessionTest(helper.CPWebCase):
setup_server = staticmethod(setup_server)
def tearDown(self):
# Clean up sessions.
for fname in os.listdir(localDir):
if fname.startswith(sessions.FileSession.SESSION_PREFIX):
path = Path(localDir) / fname
path.remove_p()
@pytest.mark.xfail(reason='#1534')
def test_0_Session(self):
self.getPage('/set_session_cls/cherrypy.lib.sessions.RamSession')
self.getPage('/clear')
# Test that a normal request gets the same id in the cookies.
# Note: this wouldn't work if /data didn't load the session.
self.getPage('/data')
self.assertBody("{'aha': 'foo'}")
c = self.cookies[0]
self.getPage('/data', self.cookies)
self.assertEqual(self.cookies[0], c)
self.getPage('/testStr')
self.assertBody('1')
cookie_parts = dict([p.strip().split('=')
for p in self.cookies[0][1].split(';')])
# Assert there is an 'expires' param
self.assertEqual(set(cookie_parts.keys()),
set(['session_id', 'expires', 'Path']))
self.getPage('/testGen', self.cookies)
self.assertBody('2')
self.getPage('/testStr', self.cookies)
self.assertBody('3')
self.getPage('/data', self.cookies)
self.assertDictEqual(json_decode(self.body),
{'counter': 3, 'aha': 'foo'})
self.getPage('/length', self.cookies)
self.assertBody('2')
self.getPage('/delkey?key=counter', self.cookies)
self.assertStatus(200)
self.getPage('/set_session_cls/cherrypy.lib.sessions.FileSession')
self.getPage('/testStr')
self.assertBody('1')
self.getPage('/testGen', self.cookies)
self.assertBody('2')
self.getPage('/testStr', self.cookies)
self.assertBody('3')
self.getPage('/delkey?key=counter', self.cookies)
self.assertStatus(200)
# Wait for the session.timeout (1 second)
time.sleep(2)
self.getPage('/')
self.assertBody('1')
self.getPage('/length', self.cookies)
self.assertBody('1')
# Test session __contains__
self.getPage('/keyin?key=counter', self.cookies)
self.assertBody('True')
cookieset1 = self.cookies
# Make a new session and test __len__ again
self.getPage('/')
self.getPage('/length', self.cookies)
self.assertBody('2')
# Test session delete
self.getPage('/delete', self.cookies)
self.assertBody('done')
self.getPage('/delete', cookieset1)
self.assertBody('done')
def f():
return [
x
for x in os.listdir(localDir)
if x.startswith('session-')
]
self.assertEqual(f(), [])
# Wait for the cleanup thread to delete remaining session files
self.getPage('/')
self.assertNotEqual(f(), [])
time.sleep(2)
self.assertEqual(f(), [])
def test_1_Ram_Concurrency(self):
self.getPage('/set_session_cls/cherrypy.lib.sessions.RamSession')
self._test_Concurrency()
@pytest.mark.xfail(reason='#1306')
def test_2_File_Concurrency(self):
self.getPage('/set_session_cls/cherrypy.lib.sessions.FileSession')
self._test_Concurrency()
def _test_Concurrency(self):
client_thread_count = 5
request_count = 30
# Get initial cookie
self.getPage('/')
self.assertBody('1')
cookies = self.cookies
data_dict = {}
errors = []
def request(index):
if self.scheme == 'https':
c = HTTPSConnection('%s:%s' % (self.interface(), self.PORT))
else:
c = HTTPConnection('%s:%s' % (self.interface(), self.PORT))
for i in range(request_count):
c.putrequest('GET', '/')
for k, v in cookies:
c.putheader(k, v)
c.endheaders()
response = c.getresponse()
body = response.read()
if response.status != 200 or not body.isdigit():
errors.append((response.status, body))
else:
data_dict[index] = max(data_dict[index], int(body))
# Uncomment the following line to prove threads overlap.
# sys.stdout.write("%d " % index)
# Start <request_count> requests from each of
# <client_thread_count> concurrent clients
ts = []
for c in range(client_thread_count):
data_dict[c] = 0
t = threading.Thread(target=request, args=(c,))
ts.append(t)
t.start()
for t in ts:
t.join()
hitcount = max(data_dict.values())
expected = 1 + (client_thread_count * request_count)
for e in errors:
print(e)
self.assertEqual(hitcount, expected)
def test_3_Redirect(self):
# Start a new session
self.getPage('/testStr')
self.getPage('/iredir', self.cookies)
self.assertBody('FileSession')
def test_4_File_deletion(self):
# Start a new session
self.getPage('/testStr')
# Delete the session file manually and retry.
id = self.cookies[0][1].split(';', 1)[0].split('=', 1)[1]
path = os.path.join(localDir, 'session-' + id)
os.unlink(path)
self.getPage('/testStr', self.cookies)
def test_5_Error_paths(self):
self.getPage('/unknown/page')
self.assertErrorPage(404, "The path '/unknown/page' was not found.")
# Note: this path is *not* the same as above. The above
# takes a normal route through the session code; this one
# skips the session code's before_handler and only calls
# before_finalize (save) and on_end (close). So the session
# code has to survive calling save/close without init.
self.getPage('/restricted', self.cookies, method='POST')
self.assertErrorPage(405, response_codes[405][1])
def test_6_regenerate(self):
self.getPage('/testStr')
# grab the cookie ID
id1 = self.cookies[0][1].split(';', 1)[0].split('=', 1)[1]
self.getPage('/regen')
self.assertBody('logged in')
id2 = self.cookies[0][1].split(';', 1)[0].split('=', 1)[1]
self.assertNotEqual(id1, id2)
self.getPage('/testStr')
# grab the cookie ID
id1 = self.cookies[0][1].split(';', 1)[0].split('=', 1)[1]
self.getPage('/testStr',
headers=[
('Cookie',
'session_id=maliciousid; '
'expires=Sat, 27 Oct 2017 04:18:28 GMT; Path=/;')])
id2 = self.cookies[0][1].split(';', 1)[0].split('=', 1)[1]
self.assertNotEqual(id1, id2)
self.assertNotEqual(id2, 'maliciousid')
def test_7_session_cookies(self):
self.getPage('/set_session_cls/cherrypy.lib.sessions.RamSession')
self.getPage('/clear')
self.getPage('/session_cookie')
# grab the cookie ID
cookie_parts = dict([p.strip().split('=')
for p in self.cookies[0][1].split(';')])
# Assert there is no 'expires' param
self.assertEqual(set(cookie_parts.keys()), set(['temp', 'Path']))
id1 = cookie_parts['temp']
self.assertEqual(list(sessions.RamSession.cache), [id1])
# Send another request in the same "browser session".
self.getPage('/session_cookie', self.cookies)
cookie_parts = dict([p.strip().split('=')
for p in self.cookies[0][1].split(';')])
# Assert there is no 'expires' param
self.assertEqual(set(cookie_parts.keys()), set(['temp', 'Path']))
self.assertBody(id1)
self.assertEqual(list(sessions.RamSession.cache), [id1])
# Simulate a browser close by just not sending the cookies
self.getPage('/session_cookie')
# grab the cookie ID
cookie_parts = dict([p.strip().split('=')
for p in self.cookies[0][1].split(';')])
# Assert there is no 'expires' param
self.assertEqual(set(cookie_parts.keys()), set(['temp', 'Path']))
# Assert a new id has been generated...
id2 = cookie_parts['temp']
self.assertNotEqual(id1, id2)
self.assertEqual(set(sessions.RamSession.cache.keys()),
set([id1, id2]))
# Wait for the session.timeout on both sessions
time.sleep(2.5)
cache = list(sessions.RamSession.cache)
if cache:
if cache == [id2]:
self.fail('The second session did not time out.')
else:
self.fail('Unknown session id in cache: %r', cache)
def test_8_Ram_Cleanup(self):
def lock():
s1 = sessions.RamSession()
s1.acquire_lock()
time.sleep(1)
s1.release_lock()
t = threading.Thread(target=lock)
t.start()
start = time.time()
while not sessions.RamSession.locks and time.time() - start < 5:
time.sleep(0.01)
assert len(sessions.RamSession.locks) == 1, 'Lock not acquired'
s2 = sessions.RamSession()
s2.clean_up()
msg = 'Clean up should not remove active lock'
assert len(sessions.RamSession.locks) == 1, msg
t.join()
try:
importlib.import_module('memcache')
host, port = '127.0.0.1', 11211
for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
s = None
try:
s = socket.socket(af, socktype, proto)
# See http://groups.google.com/group/cherrypy-users/
# browse_frm/thread/bbfe5eb39c904fe0
s.settimeout(1.0)
s.connect((host, port))
s.close()
except socket.error:
if s:
s.close()
raise
break
except (ImportError, socket.error):
class MemcachedSessionTest(helper.CPWebCase):
setup_server = staticmethod(setup_server)
def test(self):
return self.skip('memcached not reachable ')
else:
class MemcachedSessionTest(helper.CPWebCase):
setup_server = staticmethod(setup_server)
def test_0_Session(self):
self.getPage('/set_session_cls/cherrypy.Sessions.MemcachedSession')
self.getPage('/testStr')
self.assertBody('1')
self.getPage('/testGen', self.cookies)
self.assertBody('2')
self.getPage('/testStr', self.cookies)
self.assertBody('3')
self.getPage('/length', self.cookies)
self.assertErrorPage(500)
self.assertInBody('NotImplementedError')
self.getPage('/delkey?key=counter', self.cookies)
self.assertStatus(200)
# Wait for the session.timeout (1 second)
time.sleep(1.25)
self.getPage('/')
self.assertBody('1')
# Test session __contains__
self.getPage('/keyin?key=counter', self.cookies)
self.assertBody('True')
# Test session delete
self.getPage('/delete', self.cookies)
self.assertBody('done')
def test_1_Concurrency(self):
client_thread_count = 5
request_count = 30
# Get initial cookie
self.getPage('/')
self.assertBody('1')
cookies = self.cookies
data_dict = {}
def request(index):
for i in range(request_count):
self.getPage('/', cookies)
# Uncomment the following line to prove threads overlap.
# sys.stdout.write("%d " % index)
if not self.body.isdigit():
self.fail(self.body)
data_dict[index] = int(self.body)
# Start <request_count> concurrent requests from
# each of <client_thread_count> clients
ts = []
for c in range(client_thread_count):
data_dict[c] = 0
t = threading.Thread(target=request, args=(c,))
ts.append(t)
t.start()
for t in ts:
t.join()
hitcount = max(data_dict.values())
expected = 1 + (client_thread_count * request_count)
self.assertEqual(hitcount, expected)
def test_3_Redirect(self):
# Start a new session
self.getPage('/testStr')
self.getPage('/iredir', self.cookies)
self.assertBody('memcached')
def test_5_Error_paths(self):
self.getPage('/unknown/page')
self.assertErrorPage(
404, "The path '/unknown/page' was not found.")
# Note: this path is *not* the same as above. The above
# takes a normal route through the session code; this one
# skips the session code's before_handler and only calls
# before_finalize (save) and on_end (close). So the session
# code has to survive calling save/close without init.
self.getPage('/restricted', self.cookies, method='POST')
self.assertErrorPage(405, response_codes[405][1])
|
preprocessor-simulator.py
|
#!/usr/bin/env python
'''
preprocessor-simulator.py
Simulate a receiver with built-in preprocessor. This allow testing of the
light controller functionality without hooking up a RC system.
A web browser is used for the user interface
Author: Werner Lane
E-mail: laneboysrc@gmail.com
'''
from __future__ import print_function
import sys
import os
import argparse
import serial
import time
import threading
try:
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
except ImportError:
from http.server import BaseHTTPRequestHandler, HTTPServer
try:
from urlparse import parse_qs
except ImportError:
from urllib.parse import parse_qs
SLAVE_MAGIC_BYTE = 0x87
HTML_FILE = "preprocessor-simulator.html"
class QuietBaseHTTPRequestHandler(BaseHTTPRequestHandler):
def log_request(self, code, message=None):
''' Supress logging of HTTP requests '''
pass
def parse_commandline():
''' Simulate a receiver with built-in preprocessor '''
parser = argparse.ArgumentParser(
description="Simulate a receiver with built-in preprocessor.")
parser.add_argument("-b", "--baudrate", type=int, default=115200,
help='Baudrate to use. Default is 115200.')
parser.add_argument("-3", "--force-3ch", action='store_true',
help='Force 3-channel preprocessor support')
parser.add_argument("-5", "--force-5ch", action='store_true',
help='Force 5-channel preprocessor support')
parser.add_argument("-p", "--port", type=int, default=1234,
help='HTTP port for the web UI. Default is localhost:1234.')
parser.add_argument("-u", "--usb", "--webusb", action='store_true',
help='Use WebUSB to connect to the light controller instead of a serial port')
parser.add_argument("tty", nargs="?", default="/dev/ttyUSB0",
help="Serial port to use. ")
return parser.parse_args()
class CustomHTTPRequestHandler(QuietBaseHTTPRequestHandler):
''' Request handler that implements our simple web based API '''
def do_GET(self):
''' GET request handler '''
self.send_response(200)
self.send_header('Content-type', 'text/html')
if not self.server.preprocessor.args.usb:
self.send_header('Set-Cookie', 'mode=xhr;max-age=1')
self.end_headers()
html_path = os.path.join(
os.path.dirname(os.path.abspath(sys.argv[0])),
HTML_FILE)
with open(html_path, "r") as html_file:
self.wfile.write(html_file.read().encode("UTF-8"))
return
def do_POST(self):
''' POST request handler '''
query = self.rfile.read(
int(self.headers['Content-Length'])).decode("UTF-8")
try:
query = parse_qs(query, strict_parsing=True)
except ValueError:
self.send_response(400)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write("Bad querystring")
else:
response, content, config = self.server.preprocessor.api(query)
self.send_response(response)
self.send_header('Content-type', 'text/html')
self.send_header('laneboysrc-config', config)
self.end_headers()
self.wfile.write(content.encode('UTF-8'))
return
class PreprocessorApp(object):
''' Simulate a RC receiver with preprocessor '''
def __init__(self):
self.args = parse_commandline()
self.receiver = {
'ST': 0, 'TH': 0, 'CH3': 0, 'AUX': 0, 'AUX2': 0, "AUX3": 0,
'STARTUP_MODE': 1, 'PING' : 0}
self.read_thread = None
self.write_thread = None
self.done = False
self.config = ''
if self.args.force_5ch and self.args.force_3ch:
print("Error: 3 and 5 channel support can not be activated simultanously")
sys.exit(1)
if self.args.force_5ch:
self.protocol = '5'
self.args.multi_aux = True
print("Manually setting 5-channel support")
elif self.args.force_3ch:
self.protocol = '3'
self.args.multi_aux = False
print("Manually setting 3-channel support")
else:
self.protocol = 'A'
self.args.multi_aux = False
if self.args.usb:
return
try:
self.uart = serial.Serial(self.args.tty, self.args.baudrate)
except serial.SerialException as error:
print("Unable to open port %s: %s" % (self.args.tty, error))
sys.exit(1)
print("Simulating on {uart} at {baudrate} baud.".format(
uart=self.uart.port, baudrate=self.uart.baudrate))
def api(self, query):
''' Web api handler '''
config = '{} {}'.format(self.protocol, self.config)
for key, value in query.items():
if key in self.receiver:
self.receiver[key] = int(value[0])
else:
return 400, "Bad request", config
return 200, 'OK', config
def reader(self):
''' Background thread performing the UART read '''
time_of_last_line = start_time = time.time()
print(" TOTAL DIFFERENCE RESPONSE")
print("---------- ---------- --------")
while not self.done:
self.uart.timeout = 0.1
try:
data = self.uart.readline()
except serial.SerialException as error:
print("Reading from serial port failed: %s" % error)
self.errorShutdown()
return
if data:
message = data.decode('ascii', errors='replace')
if message.startswith('CONFIG'):
self.config = message.rstrip()
if self.protocol == 'A':
self.args.multi_aux = (self.config.split(' ')[1] != '0')
current_time = time.time()
time_difference = current_time - time_of_last_line
elapsed_time = current_time - start_time
print("%10.3f %10.3f %s" % (elapsed_time,
time_difference,
message),
end='')
time_of_last_line = current_time
def writer(self):
''' Background thread performing the UART transmission '''
while not self.done:
steering = self.receiver['ST']
if steering < 0:
steering = 256 + steering
throttle = self.receiver['TH']
if throttle < 0:
throttle = 256 + throttle
mode_byte = 0
if self.receiver['CH3']:
mode_byte += 0x01
if self.receiver['STARTUP_MODE']:
mode_byte += 0x10
if self.args.multi_aux:
mode_byte += 0x08
data = bytearray(
[SLAVE_MAGIC_BYTE, steering, throttle, mode_byte])
if self.args.multi_aux:
aux = self.receiver['AUX']
if aux < 0:
aux = 256 + aux
aux2 = self.receiver['AUX2']
if aux2 < 0:
aux2 = 256 + aux2
aux3 = self.receiver['AUX3']
if aux3 < 0:
aux3 = 256 + aux3
data.extend([aux, aux2, aux3])
try:
self.uart.write(data)
self.uart.flush()
except serial.SerialException as error:
print("Writing to serial port failed: %s" % error)
self.errorShutdown()
return
time.sleep(0.02)
def run(self):
''' Send the test patterns to the TLC5940 based slave '''
self.server = HTTPServer(('', self.args.port), CustomHTTPRequestHandler)
self.server.preprocessor = self
print("Please call up the user interface on localhost:{port}".format(
port=self.args.port))
if not self.args.usb:
self.read_thread = threading.Thread(target=self.reader, name='rx')
self.read_thread.daemon = True
self.read_thread.start()
self.write_thread = threading.Thread(target=self.writer, name='tx')
self.write_thread.daemon = True
self.write_thread.start()
self.server.serve_forever()
def shutdown(self):
''' Shut down the application, wait for the uart thread to finish '''
self.done = True
if not self.write_thread is None:
self.write_thread.join()
if not self.read_thread is None:
self.read_thread.join()
def errorShutdown(self):
''' Shut down the application in case an error occured '''
self.done = True
self.uart.close()
self.server.shutdown()
sys.exit(1)
def main():
''' Program start '''
app = PreprocessorApp()
try:
app.run()
except KeyboardInterrupt:
print("")
app.shutdown()
sys.exit(0)
if __name__ == '__main__':
main()
'''
Architecture discussion: 3ch vs 5ch mode
========================================
Since August 2019 the preprocessor-simulator supports both the old 3-channel
preprocessor protocol and the extended 5-channel version.
With the introduction of the additional channels we also have to deal now
with the added complexity of configurable AUX controls (2-position switch,
3-position switch, etc.)
From a user perspective we want to support the following modes:
- If a new light controller firmware is connected that sends the CONFIG
information via UART debug then the preprocessor-simulator should switch
the protocol, as well as the various AUX control types, automatically
according to the values of CONFIG.
This means for 3-channel operation only one AUX channel is shown, and the
selection of the switch type (2-position switch, 2-position with up/down
button, and momentary) is automatic (drop-down disabled)
For 5-channel operation all three AUX channels are shown, and the selection
of the switch type (including 3-position and analog) are automatic (drop-down
disabled)
- If a light controller firmware is connected that does not send CONFIG
information then the preprocessor-simulator assumes the 3-channel protocol,
and allows the user to select the switch type (but only 2-position switch,
2-position with up/down button, and momentary)
- If the -3 command line parameter is given the preprocessor-simulator forces
the 3-channel protocol and UI regardless of CONFIG information. The drop-down
is enabled and allows the user to select the switch type (but only 2-position switch,
2-position with up/down button, and momentary)
- If the -5 command line parameter is given the preprocessor-simulator forces
the 5-channel protocol and allows the user to select all possible switch types
manually.
It is important to note that the application is split between two code-bases
(Python and JavaScript), which need to be kept in sync. Ideally we only have
one source of thruth.
Since the Python part processes both UART and command line parameters, it is the
logical choice for storing the actual configuration to use.
The Python code is not able to send informaiton to the JavaScript whenever it
wants; the JavaScript has to call the Python API via HTTP POST and then Python
can return information in the HTTP response. Since the JavaScript part pings
the Python part at least every 3 seconds, the maximum latency of configuration
changes is 3 seconds.
The Python API passes configuration information via a custom HTTP header named
'laneboysrc-config' containing a text string as follows:
A [CONFIG 1 2 3 4 5 6 7 8 9]
Each element is separated by a single space. The optional CONFIG ... part is
only present when the light controller sends the information via its debug port.
It is a verbatim copy of the debug string starting with CONFIG.
A: A=automatic (depends on CONFIG presence/values); 3=force 3ch; 5=force 5ch
CONFIG: The string literal CONFIG
1: config.flags2.multi_aux (0 or 1)
2: config.flags.ch3_is_momentary (0 or 1)
3: config.flags.ch3_is_two_button (0 or 1)
4: config.aux_type
5: config.aux_function
6: config.aux2_type
7: config.aux2_function
8: config.aux3_type
9: config.aux3_function
'''
|
custom.py
|
import sublime
import threading
from sublime_plugin import WindowCommand
from ..git_command import GitCommand
from ..runtime import enqueue_on_worker
from ..ui_mixins.input_panel import show_single_line_input_panel
from ..view import replace_view_content
from ...common import util
__all__ = (
"gs_custom",
)
class gs_custom(WindowCommand, GitCommand):
"""
Run the specified custom command asynchronously.
"""
def run(self, **kwargs):
args = kwargs.get('args')
if not args:
sublime.error_message("Custom command must provide args.")
return
# prompt for custom command argument
if '{PROMPT_ARG}' in args:
prompt_msg = kwargs.pop("prompt_msg", "Command argument: ")
return show_single_line_input_panel(
prompt_msg,
"",
lambda arg: self.run_impl(custom_argument=arg, **kwargs)
)
self.run_impl(**kwargs)
def run_impl(
self,
output_to_panel=False,
output_to_buffer=False,
args=None,
start_msg="Starting custom command...",
complete_msg="Completed custom command.",
syntax=None,
run_in_thread=False,
custom_argument=None,
custom_environ=None,
):
for idx, arg in enumerate(args):
if arg == "{REPO_PATH}":
args[idx] = self.repo_path
elif arg == "{FILE_PATH}":
args[idx] = self.file_path
elif arg == "{PROMPT_ARG}":
args[idx] = custom_argument
def program():
self.window.status_message(start_msg)
stdout = self.git(*args, custom_environ=custom_environ)
self.window.status_message(complete_msg)
if output_to_panel:
util.log.panel(stdout)
if output_to_buffer:
view = self.window.new_file()
view.set_scratch(True)
if syntax:
view.set_syntax_file(syntax)
replace_view_content(view, stdout.replace("\r", "\n"))
util.view.refresh_gitsavvy_interfaces(self.window)
if run_in_thread:
threading.Thread(target=program, daemon=True).start()
else:
enqueue_on_worker(program)
|
val.py
|
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
Validate a trained YOLOv5 model accuracy on a custom dataset
Usage:
$ python path/to/val.py --data coco128.yaml --weights yolov5s.pt --img 640
"""
import argparse
import json
import os
import sys
from pathlib import Path
from threading import Thread
import numpy as np
import torch
from tqdm import tqdm
FILE = Path(__file__).resolve()
ROOT = FILE.parents[0] # YOLOv5 root directory
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT)) # add ROOT to PATH
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
from models.experimental import attempt_load
from utils.datasets import create_dataloader
from utils.general import box_iou, coco80_to_coco91_class, colorstr, check_dataset, check_img_size, \
check_requirements, check_suffix, check_yaml, increment_path, non_max_suppression, print_args, scale_coords, \
xyxy2xywh, xywh2xyxy, LOGGER
from utils.metrics import ap_per_class, ConfusionMatrix
from utils.plots import output_to_target, plot_images, plot_val_study
from utils.torch_utils import select_device, time_sync
from utils.callbacks import Callbacks
def save_one_txt(predn, save_conf, shape, file):
# Save one txt result
gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh
for *xyxy, conf, cls in predn.tolist():
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
with open(file, 'a') as f:
f.write(('%g ' * len(line)).rstrip() % line + '\n')
def save_one_json(predn, jdict, path, class_map):
# Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}
image_id = int(path.stem) if path.stem.isnumeric() else path.stem
box = xyxy2xywh(predn[:, :4]) # xywh
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
for p, b in zip(predn.tolist(), box.tolist()):
jdict.append({'image_id': image_id,
'category_id': class_map[int(p[5])],
'bbox': [round(x, 3) for x in b],
'score': round(p[4], 5)})
def process_batch(detections, labels, iouv):
"""
Return correct predictions matrix. Both sets of boxes are in (x1, y1, x2, y2) format.
Arguments:
detections (Array[N, 6]), x1, y1, x2, y2, conf, class
labels (Array[M, 5]), class, x1, y1, x2, y2
Returns:
correct (Array[N, 10]), for 10 IoU levels
"""
correct = torch.zeros(detections.shape[0], iouv.shape[0], dtype=torch.bool, device=iouv.device)
iou = box_iou(labels[:, 1:], detections[:, :4])
x = torch.where((iou >= iouv[0]) & (labels[:, 0:1] == detections[:, 5])) # IoU above threshold and classes match
if x[0].shape[0]:
matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detection, iou]
if x[0].shape[0] > 1:
matches = matches[matches[:, 2].argsort()[::-1]]
matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
# matches = matches[matches[:, 2].argsort()[::-1]]
matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
matches = torch.Tensor(matches).to(iouv.device)
correct[matches[:, 1].long()] = matches[:, 2:3] >= iouv
return correct
@torch.no_grad()
def run(data,
weights=None, # model.pt path(s)
batch_size=32, # batch size
imgsz=640, # inference size (pixels)
conf_thres=0.001, # confidence threshold
iou_thres=0.6, # NMS IoU threshold
task='val', # train, val, test, speed or study
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
single_cls=False, # treat as single-class dataset
augment=False, # augmented inference
verbose=False, # verbose output
save_txt=False, # save results to *.txt
save_hybrid=False, # save label+prediction hybrid results to *.txt
save_conf=False, # save confidences in --save-txt labels
save_json=False, # save a COCO-JSON results file
project=ROOT / 'runs/val', # save to project/name
name='exp', # save to project/name
exist_ok=False, # existing project/name ok, do not increment
half=True, # use FP16 half-precision inference
model=None,
dataloader=None,
save_dir=Path(''),
plots=True,
callbacks=Callbacks(),
compute_loss=None,
):
# Initialize/load model and set device
training = model is not None
if training: # called by train.py
device = next(model.parameters()).device # get model device
else: # called directly
device = select_device(device, batch_size=batch_size)
# Directories
save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
# Load model
check_suffix(weights, '.pt')
model = attempt_load(weights, map_location=device) # load FP32 model
gs = max(int(model.stride.max()), 32) # grid size (max stride)
imgsz = check_img_size(imgsz, s=gs) # check image size
# Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99
# if device.type != 'cpu' and torch.cuda.device_count() > 1:
# model = nn.DataParallel(model)
# Data
data = check_dataset(data) # check
# Half
half &= device.type != 'cpu' # half precision only supported on CUDA
model.half() if half else model.float()
# Configure
model.eval()
is_coco = isinstance(data.get('val'), str) and data['val'].endswith('coco/val2017.txt') # COCO dataset
nc = 1 if single_cls else int(data['nc']) # number of classes
iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95
niou = iouv.numel()
# Dataloader
if not training:
if device.type != 'cpu':
model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once
pad = 0.0 if task == 'speed' else 0.5
task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images
dataloader = create_dataloader(data[task], imgsz, batch_size, gs, single_cls, pad=pad, rect=True,
prefix=colorstr(f'{task}: '))[0]
seen = 0
confusion_matrix = ConfusionMatrix(nc=nc)
names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)}
class_map = coco80_to_coco91_class() if is_coco else list(range(1000))
s = ('%20s' + '%11s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95')
dt, p, r, f1, mp, mr, map50, map = [0.0, 0.0, 0.0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
loss = torch.zeros(3, device=device)
jdict, stats, ap, ap_class = [], [], [], []
for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)):
t1 = time_sync()
img = img.to(device, non_blocking=True)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
targets = targets.to(device)
nb, _, height, width = img.shape # batch size, channels, height, width
t2 = time_sync()
dt[0] += t2 - t1
# Run model
out, train_out = model(img, augment=augment) # inference and training outputs
dt[1] += time_sync() - t2
# Compute loss
if compute_loss:
loss += compute_loss([x.float() for x in train_out], targets)[1] # box, obj, cls
# Run NMS
targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels
lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling
t3 = time_sync()
out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls)
dt[2] += time_sync() - t3
# Statistics per image
for si, pred in enumerate(out):
labels = targets[targets[:, 0] == si, 1:]
nl = len(labels)
tcls = labels[:, 0].tolist() if nl else [] # target class
path, shape = Path(paths[si]), shapes[si][0]
seen += 1
if len(pred) == 0:
if nl:
stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))
continue
# Predictions
if single_cls:
pred[:, 5] = 0
predn = pred.clone()
scale_coords(img[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred
# Evaluate
if nl:
tbox = xywh2xyxy(labels[:, 1:5]) # target boxes
scale_coords(img[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels
labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels
correct = process_batch(predn, labelsn, iouv)
if plots:
confusion_matrix.process_batch(predn, labelsn)
else:
correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool)
stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls)) # (correct, conf, pcls, tcls)
# Save/log
if save_txt:
save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / (path.stem + '.txt'))
if save_json:
save_one_json(predn, jdict, path, class_map) # append to COCO-JSON dictionary
callbacks.run('on_val_image_end', pred, predn, path, names, img[si])
# Plot images
if plots and batch_i < 3:
f = save_dir / f'val_batch{batch_i}_labels.jpg' # labels
Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start()
f = save_dir / f'val_batch{batch_i}_pred.jpg' # predictions
Thread(target=plot_images, args=(img, output_to_target(out), paths, f, names), daemon=True).start()
# Compute statistics
stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy
if len(stats) and stats[0].any():
p, r, ap, f1, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names)
ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95
mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class
else:
nt = torch.zeros(1)
# Print results
pf = '%20s' + '%11i' * 2 + '%11.3g' * 4 # print format
LOGGER.info(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
# Print results per class
if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
for i, c in enumerate(ap_class):
LOGGER.info(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
# Print speeds
t = tuple(x / seen * 1E3 for x in dt) # speeds per image
if not training:
shape = (batch_size, 3, imgsz, imgsz)
LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t)
# Plots
if plots:
confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
callbacks.run('on_val_end')
# Save JSON
if save_json and len(jdict):
w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json
pred_json = str(save_dir / f"{w}_predictions.json") # predictions json
LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...')
with open(pred_json, 'w') as f:
json.dump(jdict, f)
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
check_requirements(['pycocotools'])
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
anno = COCO(anno_json) # init annotations api
pred = anno.loadRes(pred_json) # init predictions api
eval = COCOeval(anno, pred, 'bbox')
if is_coco:
eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files] # image IDs to evaluate
eval.evaluate()
eval.accumulate()
eval.summarize()
map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)
except Exception as e:
LOGGER.info(f'pycocotools unable to run: {e}')
# Return results
model.float() # for training
if not training:
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
maps = np.zeros(nc) + map
for i, c in enumerate(ap_class):
maps[c] = ap[i]
return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t
def parse_opt():
parser = argparse.ArgumentParser()
parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path')
parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model.pt path(s)')
parser.add_argument('--batch-size', type=int, default=32, help='batch size')
parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.6, help='NMS IoU threshold')
parser.add_argument('--task', default='val', help='train, val, test, speed or study')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--verbose', action='store_true', help='report mAP by class')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file')
parser.add_argument('--project', default=ROOT / 'runs/val', help='save to project/name')
parser.add_argument('--name', default='exp', help='save to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
opt = parser.parse_args()
opt.data = check_yaml(opt.data) # check YAML
opt.save_json |= opt.data.endswith('coco.yaml')
opt.save_txt |= opt.save_hybrid
print_args(FILE.stem, opt)
return opt
def main(opt):
check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
if opt.task in ('train', 'val', 'test'): # run normally
run(**vars(opt))
elif opt.task == 'speed': # speed benchmarks
# python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt...
for w in opt.weights if isinstance(opt.weights, list) else [opt.weights]:
run(opt.data, weights=w, batch_size=opt.batch_size, imgsz=opt.imgsz, conf_thres=.25, iou_thres=.45,
device=opt.device, save_json=False, plots=False)
elif opt.task == 'study': # run over a range of settings and save/plot
# python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n.pt yolov5s.pt...
x = list(range(256, 1536 + 128, 128)) # x axis (image sizes)
for w in opt.weights if isinstance(opt.weights, list) else [opt.weights]:
f = f'study_{Path(opt.data).stem}_{Path(w).stem}.txt' # filename to save to
y = [] # y axis
for i in x: # img-size
LOGGER.info(f'\nRunning {f} point {i}...')
r, _, t = run(opt.data, weights=w, batch_size=opt.batch_size, imgsz=i, conf_thres=opt.conf_thres,
iou_thres=opt.iou_thres, device=opt.device, save_json=opt.save_json, plots=False)
y.append(r + t) # results and times
np.savetxt(f, y, fmt='%10.4g') # save
os.system('zip -r study.zip study_*.txt')
plot_val_study(x=x) # plot
if __name__ == "__main__":
opt = parse_opt()
main(opt)
|
dataset.py
|
import pandas as pd
import numpy as np
import os
import librosa
from queue import Queue
from threading import Thread
from typing import Tuple
DOC_PATH = 'alc_original/DOC/IS2011CHALLENGE'
DATA_PATH = 'alc_original'
TRAIN_TABLE = 'TRAIN.TBL'
D1_TABLE = 'D1.TBL'
D2_TABLE = 'D2.TBL'
TEST_TABLE = 'TESTMAPPING.txt'
SR = 16000
class ALCDataset:
def __init__(self, path):
self.dataset_path = path
self.__load_meta_file()
def __process_meta(self, meta):
meta['file_name'] = meta['file_name'].map(lambda x: x[x.find('/') + 1:].lower())
meta['file_name'] = meta['file_name'].map(lambda x: x[:-8] + 'm' + x[-7:])
meta['session'] = meta['file_name'].map(lambda x: x[:x.find('/')])
meta['label'] = meta['user_state'].map(lambda x: 1 if x == 'I' else 0)
return meta
def __load_meta_file(self):
"""Load meta file.
:return: None
"""
assert os.path.exists(self.dataset_path)
doc_folder = os.path.join(self.dataset_path, DOC_PATH)
train_meta_path = os.path.join(doc_folder, TRAIN_TABLE)
self.train_meta = pd.read_csv(train_meta_path, sep='\t', names=['file_name', 'bac', 'user_state'])
self.train_meta = self.__process_meta(self.train_meta)
d1_meta_path = os.path.join(doc_folder, D1_TABLE)
self.d1_meta = pd.read_csv(d1_meta_path, sep='\t', names=['file_name', 'bac', 'user_state'])
self.d1_meta = self.__process_meta(self.d1_meta)
d2_meta_path = os.path.join(doc_folder, D2_TABLE)
self.d2_meta = pd.read_csv(d2_meta_path, sep='\t', names=['file_name', 'bac', 'user_state'])
self.d2_meta = self.__process_meta(self.d2_meta)
test_meta_path = os.path.join(doc_folder, TEST_TABLE)
self.test_meta = pd.read_csv(test_meta_path, sep='\t',
names=['file_name', 'bac', 'user_state', 'test_file_name'])
self.test_meta = self.test_meta[['file_name', 'bac', 'user_state']]
self.test_meta = self.__process_meta(self.test_meta)
def __load_wav(self, path):
audio, _ = librosa.load(path, sr=SR)
return audio
def load_data(self, split, percentage=0.2, num_threads=4):
split = split.lower()
assert split in ('train', 'd1', 'd2', 'test')
assert 0 <= percentage <= 1
meta = getattr(self, f'{split}_meta')
# Only load part of the dataset to avoid OOM
partial_meta = meta[:int(len(meta) * percentage)]
audios_list = [{} for _ in range(num_threads)]
q = Queue()
def load(q, audios, dataset_path, data_path, i):
while not q.empty():
if i == 0:
print(f'{q.qsize():05d} left.', end='\r')
path = q.get()
audio_path = os.path.join(dataset_path, data_path, path)
audios[path] = self.__load_wav(audio_path)
q.task_done()
return True
for file_name in partial_meta['file_name']:
q.put(file_name)
for i in range(num_threads):
worker = Thread(target=load, args=(q, audios_list[i], self.dataset_path, DATA_PATH, i))
worker.setDaemon(True)
worker.start()
q.join()
audios = {}
for i in range(num_threads):
audios.update(audios_list[i])
data = []
for file_name in partial_meta['file_name']:
data.append(audios[file_name])
label = meta['label'].to_numpy()
return data, label
|
train.py
|
# Author: Bichen Wu (bichen@berkeley.edu) 08/25/2016
"""Train"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
from datetime import datetime
import os.path
import sys
import time
import numpy as np
from six.moves import xrange
import tensorflow as tf
import threading
from config import *
from dataset import pascal_voc, kitti
from utils.util import sparse_to_dense, bgr_to_rgb, bbox_transform
from nets import *
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('dataset', 'KITTI',
"""Currently only support KITTI dataset.""")
tf.app.flags.DEFINE_string('data_path', '', """Root directory of data""")
tf.app.flags.DEFINE_string('image_set', 'train',
""" Can be train, trainval, val, or test""")
tf.app.flags.DEFINE_string('year', '2007',
"""VOC challenge year. 2007 or 2012"""
"""Only used for Pascal VOC dataset""")
tf.app.flags.DEFINE_string('train_dir', '/media/scott/ubuntusoftware/squeezeDetplus9+DATA/logs/squeezedet/',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 140001,
"""Maximum number of batches to run.""")
tf.app.flags.DEFINE_string('net', 'squeezeDet',
"""Neural net architecture. """)
tf.app.flags.DEFINE_string('pretrained_model_path', '',
"""Path to the pretrained model.""")
tf.app.flags.DEFINE_integer('summary_step', 10,
"""Number of steps to save summary.""")
tf.app.flags.DEFINE_integer('checkpoint_step', 1000,
"""Number of steps to save summary.""")
tf.app.flags.DEFINE_string('gpu', '0', """gpu id.""")
#tf.app.flags.DEFINE_float('gpu_memory_fraction', 0.1, 'GPU memory fraction to use.')
def _draw_box(im, box_list, label_list, color=(0,255,0), cdict=None, form='center'):
assert form == 'center' or form == 'diagonal', \
'bounding box format not accepted: {}.'.format(form)
for bbox, label in zip(box_list, label_list):
if form == 'center':
bbox = bbox_transform(bbox)
xmin, ymin, xmax, ymax = [int(b) for b in bbox]
l = label.split(':')[0] # text before "CLASS: (PROB)"
if cdict and l in cdict:
c = cdict[l]
else:
c = color
if(l == 'car'):
# draw box
cv2.rectangle(im, (xmin, ymin), (xmax, ymax), (255,0,0), 1)
# draw label
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(im, label, (xmin, ymin), font, 0.3, (255,0,0), 1)
if(l == 'pedestrian'):
# draw box
cv2.rectangle(im, (xmin, ymin), (xmax, ymax), (0,255,0), 1)
# draw label
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(im, label, (xmin, ymin), font, 0.3, (0,255,0), 1)
if(l == 'cyclist'):
# draw box
cv2.rectangle(im, (xmin, ymin), (xmax, ymax), (0,0,255), 1)
# draw label
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(im, label, (xmin, ymin), font, 0.3, (0,0,255), 1)
def _viz_prediction_result(model, images, bboxes, labels, batch_det_bbox,
batch_det_class, batch_det_prob):
mc = model.mc
for i in range(len(images)):
# draw ground truth
#_draw_box(
# images[i], bboxes[i],
# [mc.CLASS_NAMES[idx] for idx in labels[i]],
# (0, 255, 0))
# draw prediction
det_bbox, det_prob, det_class = model.filter_prediction(
batch_det_bbox[i], batch_det_prob[i], batch_det_class[i])
keep_idx = [idx for idx in range(len(det_prob)) \
if det_prob[idx] > mc.PLOT_PROB_THRESH]
det_bbox = [det_bbox[idx] for idx in keep_idx]
det_prob = [det_prob[idx] for idx in keep_idx]
det_class = [det_class[idx] for idx in keep_idx]
_draw_box(
images[i], det_bbox,
[mc.CLASS_NAMES[idx]+': (%.2f)'% prob \
for idx, prob in zip(det_class, det_prob)],
(0, 0, 255))
def train():
"""Train SqueezeDet model"""
assert FLAGS.dataset == 'KITTI', \
'Currently only support KITTI dataset'
os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu
with tf.Graph().as_default():
assert FLAGS.net == 'vgg16' or FLAGS.net == 'resnet50' \
or FLAGS.net == 'squeezeDet' or FLAGS.net == 'fire-FRD-CNN', \
'Selected neural net architecture not supported: {}'.format(FLAGS.net)
if FLAGS.net == 'vgg16':
mc = kitti_vgg16_config()
mc.IS_TRAINING = True
mc.PRETRAINED_MODEL_PATH = FLAGS.pretrained_model_path
model = VGG16ConvDet(mc)
elif FLAGS.net == 'resnet50':
mc = kitti_res50_config()
mc.IS_TRAINING = True
mc.PRETRAINED_MODEL_PATH = FLAGS.pretrained_model_path
model = ResNet50ConvDet(mc)
elif FLAGS.net == 'squeezeDet':
mc = kitti_squeezeDet_config()
mc.IS_TRAINING = True
mc.PRETRAINED_MODEL_PATH = FLAGS.pretrained_model_path
model = SqueezeDet(mc)
elif FLAGS.net == 'fire-FRD-CNN':
mc = kitti_squeezeDetPlus_config()
mc.IS_TRAINING = True
mc.PRETRAINED_MODEL_PATH = FLAGS.pretrained_model_path
model = SqueezeDetPlus(mc)
imdb = kitti(FLAGS.image_set, FLAGS.data_path, mc)
# save model size, flops, activations by layers
with open(os.path.join(FLAGS.train_dir, 'model_metrics.txt'), 'w') as f:
f.write('Number of parameter by layer:\n')
count = 0
for c in model.model_size_counter:
f.write('\t{}: {}\n'.format(c[0], c[1]))
count += c[1]
f.write('\ttotal: {}\n'.format(count))
count = 0
f.write('\nActivation size by layer:\n')
for c in model.activation_counter:
f.write('\t{}: {}\n'.format(c[0], c[1]))
count += c[1]
f.write('\ttotal: {}\n'.format(count))
count = 0
f.write('\nNumber of flops by layer:\n')
for c in model.flop_counter:
f.write('\t{}: {}\n'.format(c[0], c[1]))
count += c[1]
f.write('\ttotal: {}\n'.format(count))
f.close()
print ('Model statistics saved to {}.'.format(
os.path.join(FLAGS.train_dir, 'model_metrics.txt')))
def _load_data(load_to_placeholder=True):
# read batch input
image_per_batch, label_per_batch, box_delta_per_batch, aidx_per_batch, \
bbox_per_batch = imdb.read_batch()
label_indices, bbox_indices, box_delta_values, mask_indices, box_values, \
= [], [], [], [], []
aidx_set = set()
num_discarded_labels = 0
num_labels = 0
for i in range(len(label_per_batch)): # batch_size
for j in range(len(label_per_batch[i])): # number of annotations
num_labels += 1
if (i, aidx_per_batch[i][j]) not in aidx_set:
aidx_set.add((i, aidx_per_batch[i][j]))
label_indices.append(
[i, aidx_per_batch[i][j], label_per_batch[i][j]])
mask_indices.append([i, aidx_per_batch[i][j]])
bbox_indices.extend(
[[i, aidx_per_batch[i][j], k] for k in range(4)])
box_delta_values.extend(box_delta_per_batch[i][j])
box_values.extend(bbox_per_batch[i][j])
else:
num_discarded_labels += 1
if mc.DEBUG_MODE:
print ('Warning: Discarded {}/({}) labels that are assigned to the same '
'anchor'.format(num_discarded_labels, num_labels))
if load_to_placeholder:
image_input = model.ph_image_input
input_mask = model.ph_input_mask
box_delta_input = model.ph_box_delta_input
box_input = model.ph_box_input
labels = model.ph_labels
else:
image_input = model.image_input
input_mask = model.input_mask
box_delta_input = model.box_delta_input
box_input = model.box_input
labels = model.labels
feed_dict = {
image_input: image_per_batch,
input_mask: np.reshape(
sparse_to_dense(
mask_indices, [mc.BATCH_SIZE, mc.ANCHORS],
[1.0]*len(mask_indices)),
[mc.BATCH_SIZE, mc.ANCHORS, 1]),
box_delta_input: sparse_to_dense(
bbox_indices, [mc.BATCH_SIZE, mc.ANCHORS, 4],
box_delta_values),
box_input: sparse_to_dense(
bbox_indices, [mc.BATCH_SIZE, mc.ANCHORS, 4],
box_values),
labels: sparse_to_dense(
label_indices,
[mc.BATCH_SIZE, mc.ANCHORS, mc.CLASSES],
[1.0]*len(label_indices)),
}
return feed_dict, image_per_batch, label_per_batch, bbox_per_batch
def _enqueue(sess, coord):
try:
while not coord.should_stop():
feed_dict, _, _, _ = _load_data()
sess.run(model.enqueue_op, feed_dict=feed_dict)
if mc.DEBUG_MODE:
print ("added to the queue")
if mc.DEBUG_MODE:
print ("Finished enqueue")
except Exception, e:
coord.request_stop(e)
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
# config = tf.ConfigProto()
# config.gpu_options.per_process_gpu_memory_fraction = 0.9
# sess = tf.Session(config=config)
saver = tf.train.Saver(tf.global_variables())
summary_op = tf.summary.merge_all()
ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)
init = tf.global_variables_initializer()
sess.run(init)
coord = tf.train.Coordinator()
if mc.NUM_THREAD > 0:
enq_threads = []
for _ in range(mc.NUM_THREAD):
enq_thread = threading.Thread(target=_enqueue, args=[sess, coord])
# enq_thread.isDaemon()
enq_thread.start()
enq_threads.append(enq_thread)
threads = tf.train.start_queue_runners(coord=coord, sess=sess)
run_options = tf.RunOptions(timeout_in_ms=60000)
# try:
for step in xrange(FLAGS.max_steps):
if coord.should_stop():
sess.run(model.FIFOQueue.close(cancel_pending_enqueues=True))
coord.request_stop()
coord.join(threads)
break
start_time = time.time()
if step % FLAGS.summary_step == 0:
feed_dict, image_per_batch, label_per_batch, bbox_per_batch = \
_load_data(load_to_placeholder=False)
op_list = [
model.train_op, model.loss, summary_op, model.det_boxes,
model.det_probs, model.det_class, model.conf_loss,
model.bbox_loss, model.class_loss
]
_, loss_value, summary_str, det_boxes, det_probs, det_class, \
conf_loss, bbox_loss, class_loss = sess.run(
op_list, feed_dict=feed_dict)
_viz_prediction_result(
model, image_per_batch, bbox_per_batch, label_per_batch, det_boxes,
det_class, det_probs)
image_per_batch = bgr_to_rgb(image_per_batch)
viz_summary = sess.run(
model.viz_op, feed_dict={model.image_to_show: image_per_batch})
summary_writer.add_summary(summary_str, step)
summary_writer.add_summary(viz_summary, step)
summary_writer.flush()
print ('conf_loss: {}, bbox_loss: {}, class_loss: {}'.
format(conf_loss, bbox_loss, class_loss))
else:
if mc.NUM_THREAD > 0:
_, loss_value, conf_loss, bbox_loss, class_loss = sess.run(
[model.train_op, model.loss, model.conf_loss, model.bbox_loss,
model.class_loss], options=run_options)
else:
feed_dict, _, _, _ = _load_data(load_to_placeholder=False)
_, loss_value, conf_loss, bbox_loss, class_loss = sess.run(
[model.train_op, model.loss, model.conf_loss, model.bbox_loss,
model.class_loss], feed_dict=feed_dict)
duration = time.time() - start_time
assert not np.isnan(loss_value), \
'Model diverged. Total loss: {}, conf_loss: {}, bbox_loss: {}, ' \
'class_loss: {}'.format(loss_value, conf_loss, bbox_loss, class_loss)
if step % 10 == 0:
num_images_per_step = mc.BATCH_SIZE
images_per_sec = num_images_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.2f (%.1f images/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), step, loss_value,
images_per_sec, sec_per_batch))
sys.stdout.flush()
# Save the model checkpoint periodically.
if step % FLAGS.checkpoint_step == 0 or (step + 1) == FLAGS.max_steps:
checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
# except Exception, e:
# coord.request_stop(e)
# finally:
# coord.request_stop()
# coord.join(threads)
def main(argv=None): # pylint: disable=unused-argument
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run()
|
miner.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
## Copyright (c) 2017, The Sumokoin Project (www.sumokoin.org)
'''
Main miner worker, RPC client
'''
import sys, psutil
import json, socket, struct
import errno
import threading, time, urlparse, random, platform
from multiprocessing import Process, Event, cpu_count
#from threading import Timer
from libs import cpu_has_aes_in_supported, cryptolite_hash, cryptonite_hash
import settings
from utils.logger import log, LEVEL_DEBUG, LEVEL_ERROR, LEVEL_INFO, LEVEL_PROTOCOL
POOL_ERROR_MSGS = ["Unauthenticated", "Timeout", "Invalid job id"]
HAS_AES_NI = cpu_has_aes_in_supported() # mark if CPU has AES-NI supported
CPU_COUNT = cpu_count()
MAX_INT = 0xffffffff
# Convert from/to binary and hexidecimal strings
# (could be replaced with .encode('hex') and .decode('hex'))
from binascii import hexlify, unhexlify
NETWORK_ERROR_MSG = "Network error! Reconnecting..."
def human_readable_hashrate(hashrate):
'''Returns a human readable representation of hashrate.'''
if hashrate < 1000:
return '%.2f H/s' % hashrate
if hashrate < 10000000:
return '%.2f kH/s' % (hashrate / 1000)
if hashrate < 10000000000:
return '%.2f mH/s' % (hashrate / 1000000)
return '%.2f gH/s' % (hashrate / 1000000000)
""" decode 256-bit target value """
def decode_target_value(target_hex):
target_bin = unhexlify(target_hex)
target_bin = target_bin[::-1] # byte-swap and dword-swap
target_bin_str = hexlify(target_bin)
target = long(target_bin_str, 16)
difficulty = float(MAX_INT)/target if target > 0 else 0. # difficulty
return (target, difficulty)
class SimpleJsonRpcClient(threading.Thread):
'''Simple JSON-RPC client.
To use this class:
1) Create a sub-class
2) Override handle_reply(self, request, reply)
3) Call connect(socket)
Use self.send(method, params) to send JSON-RPC commands to the server.
A new thread is created for listening to the connection; so calls to handle_reply
are synchronized. It is safe to call send from withing handle_reply.
'''
class ClientException(Exception): pass
class RequestReplyException(Exception):
def __init__(self, message, reply, request = None):
Exception.__init__(self, message)
self._reply = reply
self._request = request
request = property(lambda s: s._request)
reply = property(lambda s: s._reply)
class RequestReplyWarning(RequestReplyException):
'''Sub-classes can raise this to inform the user of JSON-RPC server issues.'''
pass
def __init__(self):
threading.Thread.__init__(self)
self._socket = None
self._lock = threading.RLock()
self._rpc_thread = None
self._message_id = 1
self._requests = dict()
self.exit = Event()
def _handle_incoming_rpc(self):
data = ""
while not self.exit.is_set():
try:
# Get the next line if we have one, otherwise, read and block
if '\n' in data:
(line, data) = data.split('\n', 1)
else:
chunk = self._socket.recv(1024)
if chunk: data += chunk
continue
# except IOError as e:
# if e.errno == 10053: # Error: An established connection was aborted by the software in your host machine
# pass
# time.sleep(1)
# data = ""
# continue
except Exception, e:
#print >> sys.stderr, e
time.sleep(1)
data = ""
continue
log('JSON-RPC Server > ' + line, LEVEL_PROTOCOL, self._pool_id)
# Parse the JSON
try:
reply = json.loads(line)
except Exception, e:
log("JSON-RPC Error: Failed to parse JSON %r (skipping)" % line, LEVEL_ERROR, self._pool_id)
continue
try:
request = None
with self._lock:
if 'id' in reply and reply['id'] in self._requests:
request = self._requests[reply['id']]
self.handle_reply(request = request, reply = reply)
except self.RequestReplyWarning, e:
output = e.message
if e.request:
output += '\n ' + e.request
output += '\n ' + e.reply
log(output, LEVEL_ERROR)
def handle_reply(self, request, reply):
# Override this method in sub-classes to handle a message from the server
raise self.RequestReplyWarning('Override this method')
def send(self, method, params):
'''Sends a message to the JSON-RPC server'''
if not self._socket:
#raise self.ClientException('Not connected')
return
if method == 'ping':
with self._lock:
self._socket.send('\r') # just to keep alive
log('Ping sent', LEVEL_DEBUG, self._pool_id)
return
request = dict(id = self._message_id, method = method, params = params)
if settings.OPT_REPLY_WITH_RPC2_EXPLICIT:
request['jsonrpc'] = '2.0'
message = json.dumps(request)
with self._lock:
self._requests[self._message_id] = request
self._message_id += 1
self._socket.send(message + '\n')
log('JSON-RPC Server < ' + message, LEVEL_PROTOCOL, self._pool_id)
return request
def connect(self, socket):
'''Connects to a remove JSON-RPC server'''
self._socket = socket
if not self._rpc_thread:
self._rpc_thread = threading.Thread(target = self._handle_incoming_rpc)
self._rpc_thread.daemon = True
self._rpc_thread.start()
def shutdown(self):
log("RPC shutdown initiated", LEVEL_DEBUG)
self.exit.set()
class MinerRPC(SimpleJsonRpcClient):
class MinerRPCWarning(SimpleJsonRpcClient.RequestReplyWarning):
def __init__(self, message, reply, request = None):
SimpleJsonRpcClient.RequestReplyWarning.__init__(self, 'Mining Sate Error: ' + message, reply, request)
class MinerRPCAuthenticationException(SimpleJsonRpcClient.RequestReplyException): pass
def __init__(self, pool_info, work_submit_queue, g_work, work_report):
SimpleJsonRpcClient.__init__(self)
self._pool_info = pool_info
self._pool_id = pool_info['id']
self._url = pool_info['url']
self._username = pool_info['username']
self._password = pool_info['password']
self._work_submit_queue = work_submit_queue
self._g_work = g_work
self._work_report = work_report
self._login_id = None
self._thr_list = None
self._cur_stratum_diff = 0.
if 'work_accepted' in work_report:
self._work_accepted = work_report['work_accepted']
else:
self._work_accepted = 0
if 'work_submited' in work_report:
self._work_submited = work_report['work_submited']
else:
self._work_submited = 0
self._my_sock = None
self._last_check_idle_time = time.time()
url = property(lambda s: s._url)
username = property(lambda s: s._username)
password = property(lambda s: s._password)
login_id = property(lambda s: s._login_id)
def set_thread_list(self, thr_list):
self._thr_list = thr_list
# Overridden from SimpleJsonRpcClient
def handle_reply(self, request, reply):
""" Handle login result"""
if request and request.get("method") == "login":
error = reply.get("error")
if error is not None:
self._pool_info['error'] = error.get('message')
log("Error %d: %s" % (error.get('code'), error.get('message')), LEVEL_ERROR, self._pool_id)
# relogin after 10 seconds
if self._wait(10):
self._login()
return
result = reply.get("result")
if result and result.get("status") == "OK":
job_params = result.get("job")
if job_params:
self._login_id = result.get("id")
""" handle job here """
self._set_new_job(job_params)
elif request and request.get("method") == "submit":
self._work_submited += 1
self._work_report['work_submited'] = self._work_submited
if reply.get("error") is not None:
error = reply.get("error")
log("rejected: %s, %d/%d, NO!!!" % (error.get("message"),
self._work_accepted, self._work_submited), LEVEL_ERROR, self._pool_id)
if error.get("message") in POOL_ERROR_MSGS:
#self._login()
self.try_connect()
elif reply.get("result") is not None:
res = reply.get("result")
if res.get("status") == "OK":
self._work_accepted += 1
accepted_percentage = self._work_accepted*100./self._work_submited
# hash_rates = self._pool_info['hash_report'] if 'hash_report' in self._pool_info else {}
# if len(hash_rates) > 0:
# hash_rates = dict(hash_rates)
# _total_hash_rate = reduce(lambda x, y: x+y, [hash_rates[k] for k in hash_rates])
# else:
# _total_hash_rate = 0.0
_total_hash_rate = 0.0
if 'total_hashrate' in self._pool_info:
_total_hash_rate = self._pool_info['total_hashrate']
log("accepted %d/%d (%.2f%%), %s, YES!" % (self._work_accepted, self._work_submited,
accepted_percentage, human_readable_hashrate(_total_hash_rate)), LEVEL_INFO, self._pool_id)
self._work_report['work_accepted'] = self._work_accepted
elif reply.get("error") is not None:
error = reply.get("error")
if error.get("message") in POOL_ERROR_MSGS:
#self._login()
self.try_connect()
log("Error %d: %s" % (error.get('code'), error.get('message')), LEVEL_ERROR, self._pool_id)
#self.MinerRPCWarning(error.get("message"), reply)
elif reply.get("method") == "job":
job_params = reply.get("params")
""" handle job here """
if job_params:
self._set_new_job(job_params)
def _set_new_job(self, job_params):
job_id = job_params.get("job_id")
try:
target_hex = job_params.get("target")
target, difficulty = decode_target_value(target_hex)
assert(target > 0 and difficulty > 0)
except:
log("Invalid stratum target: %s" % target_hex, LEVEL_ERROR, self._pool_id)
return
blob_hex = job_params.get("blob")
try:
blob_bin = unhexlify(blob_hex)
nonce = long( hexlify(blob_bin[39:43]), 16)
assert(len(blob_bin) == 76)
assert(nonce >= 0)
except:
log("Invalid stratum blob: %s" % blob_hex, LEVEL_ERROR, self._pool_id)
return
self._g_work['login_id'] = self._login_id
self._g_work['target'] = target
self._g_work['blob_bin'] = blob_bin
self._g_work['nonce'] = nonce
self._g_work['num_thrs'] = len(self._thr_list)
self._g_work['job_id'] = job_id
self._g_work['is_cryptolite'] = self._pool_info['algo'] == "Cryptonight-Light"
log('New job recv: target="%s" blob="%s"' % (target_hex, blob_hex), LEVEL_INFO, self._pool_id)
if difficulty != self._cur_stratum_diff:
self._cur_stratum_diff = difficulty
self._work_report['difficulty'] = difficulty
log("Stratum difficulty set to %.f" % difficulty, LEVEL_INFO, self._pool_id)
def run(self):
self.try_connect()
start = time.time()
while not self.exit.is_set():
if not self._work_submit_queue.empty():
work_submit = self._work_submit_queue.get()
try:
self.send(method=work_submit['method'], params=work_submit['params'])
start = time.time() + settings.OPT_PING_INTERVAL # to delay sending 'ping' by interval setting
except socket.error:
self.try_connect()
continue
elif settings.OPT_SEND_PING:
""" 'ping' stratum server periodically to detect disconnection """
elapsed = time.time() - start
if elapsed >= settings.OPT_PING_INTERVAL:
try:
self.send(method='ping', params=None)
except socket.error:
self.try_connect()
continue
finally:
start = time.time()
""" relogin after 1 minute idle, i.e. receiving no new jobs for a long time,
may be due to some pool's error other than network error """
if time.time() - self._last_check_idle_time >= 60:
if 'error' in self._pool_info and self._pool_info['error'] == NETWORK_ERROR_MSG:
self._last_check_idle_time = time.time()
continue
hash_rates = self._pool_info['hash_report'] if 'hash_report' in self._pool_info else None
if hash_rates is not None and len(hash_rates) > 0: # it means mining is already on
total_hash_rate = reduce(lambda x, y: x+y, [hash_rates[k] for k in dict(hash_rates)])
# but mining is now idle
if total_hash_rate == 0.:
self._login()
self._last_check_idle_time = time.time()
time.sleep(.1)
""" try to close socket before exit """
try:
self._my_sock.close()
except:
pass
def try_connect(self):
url = urlparse.urlparse(self.url)
hostname = url.hostname
try:
port = int(url.port)
except:
self._pool_info['error'] = "Invalid pool port"
log("Invalid pool port!", LEVEL_ERROR)
return
if not hostname:
self._pool_info['error'] = "Invalid pool URL"
log("Invalid pool URL", LEVEL_ERROR)
return
while not self.exit.is_set():
if not self._my_sock:
log('Connecting to RPC server [%s:%d]...' % (hostname, port), LEVEL_INFO, self._pool_id)
self._my_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock_keep_alive()
else:
log(NETWORK_ERROR_MSG, LEVEL_ERROR, self._pool_id)
self._pool_info['error'] = NETWORK_ERROR_MSG
# (try to) stop all mining jobs by setting global job_id as None
self._g_work['job_id'] = None
# and clear submit works remain in queue if any
while not self._work_submit_queue.empty():
_ = self._work_submit_queue.get()
try:
self._my_sock.close()
except:
pass
else:
self._my_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock_keep_alive()
try:
self._my_sock.connect((hostname, port))
self.connect(self._my_sock)
except socket.error:
# wait 10 seconds
self._wait(10)
else:
self._login()
if 'error' in self._pool_info:
self._pool_info['error'] = None
break
def _sock_keep_alive(self):
after_idle_sec = 1
interval_sec= 3
my_os = platform.system()
try:
if my_os == "Windows":
self._my_sock.ioctl(socket.SIO_KEEPALIVE_VALS, (1, after_idle_sec*1000, interval_sec*1000))
elif my_os == "Linux":
self._my_sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
self._my_sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, after_idle_sec)
self._my_sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, interval_sec)
elif my_os == "Darwin":
TCP_KEEPALIVE = 0x10
self._my_sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
self._my_sock.setsockopt(socket.IPPROTO_TCP, TCP_KEEPALIVE, interval_sec)
except:
pass
def _login(self):
# (re)login
log('Logging in ...', LEVEL_INFO, self._pool_id)
params = dict(login=self._username, agent="%s/%d.%s"
%(settings.USER_AGENT, settings.VERSION[0], settings.VERSION[1]))
params['pass'] = self._password
try:
self.send(method='login', params = params)
except socket.error:
self.try_connect()
else:
# mark 'check idle time' to avoid multiple logins
self._last_check_idle_time = time.time()
def _wait(self, seconds=1):
""" wait without blocking UI
"""
for _ in range(seconds*10):
if self.exit.is_set(): return False
time.sleep(.1)
return True
class MinerWork(Process):
def __init__(self, thr_id, work_submit_queue, g_work, hash_report, cpu_priority_level):
Process.__init__(self)
self._cur_job_id = None
self._hash_rate = 0.0
self._thr_id = thr_id
self._work_submit_queue = work_submit_queue
self._g_work = g_work
self._hash_report_queue = hash_report
self.exit = Event()
_p = psutil.Process(self.pid)
_cpu_affinity = [CPU_COUNT - (thr_id % CPU_COUNT) - 1]
if sys.platform == "win32":
_p.cpu_affinity(_cpu_affinity)
#_p.nice(cpu_priority_level)
def run(self):
_total_hashes = 0
blob_bin = None
nonce = 1
max_nonce = target = login_id = 0
#end_nonce = MAX_INT - 0x20
end_nonce = 0
is_cryptolite = 0 # (if) is cryptonight-lite algo
# max_int32 = 2**32 # =4294967296
while not self.exit.is_set():
if not 'job_id' in self._g_work or self._g_work['job_id'] is None:
self._hash_rate = 0.
self._shareHashRate()
time.sleep(.1)
continue
if self._g_work['job_id'] != self._cur_job_id:
self._cur_job_id = self._g_work['job_id']
nonce = self._g_work['nonce']
blob_bin = self._g_work['blob_bin']
target = self._g_work['target']
login_id = self._g_work['login_id']
is_cryptolite = self._g_work['is_cryptolite']
end_nonce = MAX_INT /self._g_work['num_thrs']*(self._thr_id + 1) - 0x20
nonce += MAX_INT/self._g_work['num_thrs']*self._thr_id
""" randomize nonce start"""
if settings.OPT_RANDOMIZE:
offset = int(settings.OPT_SCANTIME*self._hash_rate) if self._hash_rate > 0 else 64*settings.OPT_SCANTIME
nonce += random.randint(0, MAX_INT/self._g_work['num_thrs'] - offset)
if nonce > MAX_INT - 0x20:
nonce = end_nonce
max64 = int(settings.OPT_SCANTIME*self._hash_rate) if self._hash_rate > 0 else 64
if nonce + max64 > end_nonce:
max_nonce = end_nonce
else:
max_nonce = nonce + max64
if max_nonce > MAX_INT:
max_nonce = MAX_INT
""" start _hash scan """
total_hashes_done = 0
_hashes_done = 0
start = _start = time.time()
while nonce <= max_nonce and not self.exit.is_set():
nonce_bin = struct.pack("<I", nonce)
blob_bin = blob_bin[:39] + nonce_bin + blob_bin[43:]
if is_cryptolite:
_hash = cryptolite_hash(blob_bin, HAS_AES_NI)
else:
_hash = cryptonite_hash(blob_bin, HAS_AES_NI)
nonce += 1
_hashes_done += 1
total_hashes_done += 1
""" calculate _hash rate"""
if _hashes_done >= self._hash_rate/2:
# if _hashes_done >= 10:
elapsed = time.time() - _start
if elapsed > 0:
self._hash_rate = _hashes_done/elapsed
""" share _hash rate """
self._shareHashRate()
log('CPU #%d: %.2f H/s' % (self._thr_id, self._hash_rate), LEVEL_DEBUG)
_start = time.time()
_hashes_done = 0
if struct.unpack("<I", _hash[28:])[0] < target:
""" Yes, hash found! """
params = dict(id=login_id, job_id = self._cur_job_id,
nonce=hexlify(nonce_bin), result=hexlify(_hash))
self._work_submit_queue.put({'method': 'submit', 'params': params})
break
""" if there is a new work, break scan """
if self._g_work['job_id'] != self._cur_job_id:
break
elapsed = time.time() - start
self._hash_rate = total_hashes_done/elapsed if elapsed > 0 else 0.
""" share _hash rate """
self._shareHashRate()
log('CPU #%d: %.2f H/s' % (self._thr_id, self._hash_rate), LEVEL_DEBUG)
""" if idle: """
if total_hashes_done == 0:
time.sleep(.1)
## Set hash_rate to 0.0 before exit
self._hash_rate = 0.
self._shareHashRate()
def _shareHashRate(self):
self._hash_report_queue.update({'%d' % self._thr_id: self._hash_rate})
def shutdown(self):
log("Miner thread# %d shutdown initiated" % self._thr_id, LEVEL_DEBUG)
self.exit.set()
def set_cpu_priority(self, cpu_priority_level):
_p = psutil.Process(self.pid)
_p.nice(cpu_priority_level)
def show_priority(self):
_p = psutil.Process(self.pid)
print "PID", _p.pid, "Priority", _p.nice()
|
subproc.py
|
from multiprocessing import Process
from time import sleep
def count_sheeps(number):
"""Count all them sheeps."""
fd=open('C:/tmp/subproc3.log','w')
for sheep in range(number):
fd.write("%s " % sheep)
#sleep(1)
fd.close()
if __name__ == "__main__":
count_sheeps(20)
p = Process(target=count_sheeps, args=(5,))
p.daemon = True
p.start()
print("Let's just forget about it and quit here and now.")
exit()
|
inference_sequence.py
|
import os
import sys
import cv2
import torch
import argparse
import numpy as np
from tqdm import tqdm
from torch.nn import functional as F
import warnings
import _thread
import threading
from queue import Queue, Empty
warnings.filterwarnings("ignore")
from pprint import pprint, pformat
import time
import psutil
import multiprocessing as mp
import inference_common
ThreadsFlag = True
IOProcesses = []
cv2.setNumThreads(1)
# Exception handler
def exeption_handler(exctype, value, tb):
import traceback
locks = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'locks')
cmd = 'rm -f ' + locks + '/*'
os.system(cmd)
pprint ('%s in %s' % (value, exctype))
pprint(traceback.format_exception(exctype, value, tb))
sys.__excepthook__(exctype, value, tb)
input("Press Enter to continue...")
sys.excepthook = exeption_handler
# Ctrl + C handler
import signal
def signal_handler(sig, frame):
global ThreadsFlag
ThreadsFlag = False
time.sleep(0.1)
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
def clear_write_buffer(write_buffer, tot_frame, frames_written):
global ThreadsFlag
global IOProcesses
def write_in_current_thread(path, item, cnt, frames_written):
try:
cv2.imwrite(path, item[:, :, ::-1], [cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])
frames_written[cnt] = path
except Exception as e:
print ('Error writing %s: %s' % (path, e))
def write_in_new_thread(path, item, cnt, frames_written):
cv2.imwrite(path, item[:, :, ::-1], [cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])
frames_written[cnt] = path
number_of_write_threads = 8
new_frames_number = ((tot_frame - 1) * ((2 ** args.exp) -1)) + tot_frame
cnt = 0
while ThreadsFlag:
alive_processes = []
for process in IOProcesses:
if process.is_alive():
alive_processes.append(process)
else:
process.join(timeout=0)
IOProcesses = list(alive_processes)
item = write_buffer.get()
# if cnt == 0:
# print ('rendering %s frames to %s' % (new_frames_number, args.output))
# pbar = tqdm(total=new_frames_number, unit='frame')
if item is None:
# pbar.close() # type: ignore
break
if cnt < new_frames_number:
path = os.path.join(os.path.abspath(args.output), '{:0>7d}.exr'.format(cnt))
if len(IOProcesses) < number_of_write_threads:
try:
p = mp.Process(target=write_in_new_thread, args=(path, item, cnt, frames_written, ))
p.start()
IOProcesses.append(p)
except:
write_in_current_thread(path, item, cnt, frames_written)
else:
write_in_current_thread(path, item, cnt, frames_written)
# pbar.update(1) # type: ignore
cnt += 1
def build_read_buffer(user_args, read_buffer, videogen):
global ThreadsFlag
for frame in videogen:
if not ThreadsFlag:
break
frame_data = cv2.imread(os.path.join(user_args.input, frame), cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)[:, :, ::-1].copy()
read_buffer.put(frame_data)
read_buffer.put(None)
def make_inference(model, I0, I1, exp, UHD):
middle = model.inference(I0, I1, UHD)
if exp == 1:
return [middle]
first_half = make_inference(model, I0, middle, exp=exp - 1, UHD=UHD)
second_half = make_inference(model, middle, I1, exp=exp - 1, UHD=UHD)
return [*first_half, middle, *second_half]
def find_middle_frame(frames, frames_taken):
for start_frame in range(1, len(frames.keys()) + 1):
for frame_number in range (start_frame, len(frames.keys()) + 1):
if frames.get(frame_number) and (not frames.get(frame_number + 1, True)):
start_frame = frame_number
break
end_frame = start_frame + 1
for frame_number in range(start_frame + 1, len(frames.keys()) + 1):
if frames.get(frame_number):
end_frame = frame_number
break
end_frame = frame_number
middle_frame = start_frame + int((end_frame - start_frame) / 2)
if frames.get(start_frame) and not frames.get(middle_frame):
if middle_frame in frames_taken.keys():
# this frame is taken by another worker
continue
else:
# mark frame as taken
frames_taken[middle_frame] = 'taken between %s and %s' % (start_frame, end_frame)
#print ('s: %s m: %s e: %s' % (start_frame, middle_frame, end_frame))
#print ('%s: %s' % ( start_frame, frames.get(start_frame) ))
#print ('%s: %s' % ( middle_frame, frames.get(middle_frame) ))
#print ('%s: %s' % ( end_frame, frames.get(end_frame) ))
return (start_frame, middle_frame, end_frame)
return False
def three_of_a_perfect_pair(frames, device, padding, model, args, h, w, frames_written, frames_taken):
perfect_pair = find_middle_frame(frames, frames_taken)
if not perfect_pair:
# print ('no more frames left')
return False
start_frame = perfect_pair[0]
middle_frame = perfect_pair[1]
end_frame = perfect_pair[2]
frame0 = cv2.imread(frames[start_frame], cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)[:, :, ::-1].copy()
frame1 = cv2.imread(frames[end_frame], cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)[:, :, ::-1].copy()
I0 = torch.from_numpy(np.transpose(frame0, (2,0,1))).to(device, non_blocking=True).unsqueeze(0)
I1 = torch.from_numpy(np.transpose(frame1, (2,0,1))).to(device, non_blocking=True).unsqueeze(0)
I0 = F.pad(I0, padding)
I1 = F.pad(I1, padding)
mid = model.inference(I0, I1, args.UHD)
mid = (((mid[0]).cpu().detach().numpy().transpose(1, 2, 0)))
midframe = mid[:h, :w]
cv2.imwrite(os.path.join(os.path.abspath(args.output), '{:0>7d}.exr'.format(middle_frame)), midframe[:, :, ::-1], [cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])
start_frame_out_file_name = os.path.join(os.path.abspath(args.output), '{:0>7d}.exr'.format(start_frame))
if not os.path.isfile(start_frame_out_file_name):
cv2.imwrite(start_frame_out_file_name, frame0[:, :, ::-1], [cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])
frames_written[ start_frame ] = start_frame_out_file_name
end_frame_out_file_name = os.path.join(os.path.abspath(args.output), '{:0>7d}.exr'.format(end_frame))
if not os.path.isfile(end_frame_out_file_name):
cv2.imwrite(end_frame_out_file_name, frame1[:, :, ::-1], [cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])
frames_written[ end_frame ] = end_frame_out_file_name
frames[ middle_frame ] = os.path.join(os.path.abspath(args.output), '{:0>7d}.exr'.format(middle_frame))
frames_written[ middle_frame ] = os.path.join(os.path.abspath(args.output), '{:0>7d}.exr'.format(middle_frame))
return True
def progress_bar_updater(frames_written, last_frame_number):
global ThreadsFlag
pbar = tqdm(total=last_frame_number, unit='frame')
lastframe = 0
while ThreadsFlag:
try:
pbar.n = len(frames_written.keys())
pbar.last_print_n = len(frames_written.keys())
if lastframe != len(frames_written.keys()):
pbar.refresh()
lastframe = len(frames_written.keys())
except:
pass
time.sleep(0.01)
pbar.close()
if __name__ == '__main__':
start = time.time()
print('initializing Timewarp ML...')
parser = argparse.ArgumentParser(description='Interpolation for a sequence of exr images')
parser.add_argument('--input', dest='input', type=str, default=None)
parser.add_argument('--output', dest='output', type=str, default=None)
parser.add_argument('--model', dest='model', type=str, default='./trained_models/default/v2.0.model')
parser.add_argument('--UHD', dest='UHD', action='store_true', help='flow size 1/4')
parser.add_argument('--exp', dest='exp', type=int, default=1)
parser.add_argument('--cpu', dest='cpu', action='store_true', help='process only on CPU(s)')
args = parser.parse_args()
assert (not args.output is None or not args.input is None)
manager = mp.Manager()
frames = manager.dict()
frames_written = manager.dict()
frames_taken = manager.dict()
img_formats = ['.exr',]
files_list = []
for f in os.listdir(args.input):
name, ext = os.path.splitext(f)
if ext in img_formats:
files_list.append(f)
input_duration = len(files_list)
if input_duration < 2:
print('not enough frames to perform slow motion: %s given' % input_duration)
input("Press Enter to continue...")
sys.exit()
input_files = {}
input_frame_number = 1
for file in sorted(files_list):
input_file_path = os.path.join(args.input, file)
if os.path.isfile(input_file_path):
input_files[input_frame_number] = input_file_path
input_frame_number += 1
first_frame_number = 1
step = (2 ** args.exp) -1
last_frame_number = (input_duration - 1) * step + input_duration
frame_number = first_frame_number
for file_name in sorted(files_list):
frames[frame_number] = os.path.join(args.input, file_name)
frame_number += step + 1
for frame_number in range(first_frame_number, last_frame_number):
frames[frame_number] = frames.get(frame_number, '')
first_image = cv2.imread(frames.get(first_frame_number), cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)[:, :, ::-1].copy()
h, w, _ = first_image.shape
ph = ((h - 1) // 64 + 1) * 64
pw = ((w - 1) // 64 + 1) * 64
padding = (0, pw - w, 0, ph - h)
output_folder = os.path.abspath(args.output)
if not os.path.exists(output_folder):
os.makedirs(output_folder)
if torch.cuda.is_available() and not args.cpu:
# process on GPU
files_list.sort()
files_list.append(files_list[-1])
write_buffer = Queue(maxsize=inference_common.OUTPUT_QUEUE_SIZE)
read_buffer = Queue(maxsize=inference_common.INPUT_QUEUE_SIZE)
_thread.start_new_thread(build_read_buffer, (args, read_buffer, files_list))
_thread.start_new_thread(clear_write_buffer, (write_buffer, input_duration, frames_written))
if 'v1.8.model' in args.model:
from model.RIFE_HD import Model # type: ignore
else:
from model.RIFE_HDv2 import Model # type: ignore
model = Model()
model.load_model(args.model, -1)
model.eval()
model.device()
print ('Trained model loaded: %s' % args.model)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if torch.cuda.is_available():
torch.set_grad_enabled(False)
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
lastframe = first_image
I1 = torch.from_numpy(np.transpose(lastframe, (2,0,1))).to(device, non_blocking=True).unsqueeze(0)
I1 = F.pad(I1, padding)
frame = read_buffer.get()
print ('rendering %s frames to %s/' % (last_frame_number, args.output))
progress_bar_updater = threading.Thread(target=progress_bar_updater, args=(frames_written, last_frame_number, ))
progress_bar_updater.daemon = True
progress_bar_updater.start()
cnt = 0
for nn in range(1, input_duration+1):
frame = read_buffer.get()
if frame is None:
break
I0 = I1
I1 = torch.from_numpy(np.transpose(frame, (2,0,1))).to(device, non_blocking=True).unsqueeze(0)
I1 = F.pad(I1, padding)
try:
output = make_inference(model, I0, I1, args.exp, args.UHD)
except Exception as e:
ThreadsFlag = False
time.sleep(0.1)
progress_bar_updater.join()
print ('\n%s' % e)
for p in IOProcesses:
p.join(timeout=8)
for p in IOProcesses:
p.terminate()
p.join(timeout=0)
sys.exit()
write_buffer.put(lastframe)
cnt += 1
for mid in output:
if sys.platform == 'darwin':
mid = (((mid[0]).cpu().detach().numpy().transpose(1, 2, 0)))
else:
mid = (((mid[0]).cpu().numpy().transpose(1, 2, 0)))
write_buffer.put(mid[:h, :w])
cnt += 1
lastframe = frame
write_buffer.put(lastframe)
while(not write_buffer.empty()):
time.sleep(0.1)
else:
# process on CPU(s)
if 'v1.8.model' in args.model:
from model_cpu.RIFE_HD import Model # type: ignore
else:
from model_cpu.RIFE_HDv2 import Model # type: ignore
model = Model()
model.load_model(args.model, -1)
model.eval()
model.device()
print ('Trained model loaded: %s' % args.model)
device = torch.device('cpu')
torch.set_grad_enabled(False)
sim_workers, thread_ram = inference_common.safe_threads_number(h, w)
print ('rendering %s frames to %s/' % (last_frame_number, args.output))
active_workers = []
progress_bar_updater = threading.Thread(target=progress_bar_updater, args=(frames_written, last_frame_number, ))
progress_bar_updater.daemon = True
progress_bar_updater.start()
last_thread_time = time.time()
while len(frames_written.keys()) != last_frame_number:
p = mp.Process(target=three_of_a_perfect_pair, args=(frames, device, padding, model, args, h, w, frames_written, frames_taken, ))
p.start()
active_workers.append(p)
# try to shift threads in time to avoid memory congestion
if (time.time() - last_thread_time) < (thread_ram / 8):
if sim_workers > 1:
time.sleep(thread_ram/8)
while len(active_workers) >= sim_workers:
finished_workers = []
alive_workers = []
for worker in active_workers:
if not worker.is_alive():
finished_workers.append(worker)
else:
alive_workers.append(worker)
active_workers = list(alive_workers)
time.sleep(0.01)
last_thread_time = time.time()
while len(active_workers):
finished_workers = []
alive_workers = []
for worker in active_workers:
if not worker.is_alive():
finished_workers.append(worker)
else:
alive_workers.append(worker)
active_workers = list(alive_workers)
time.sleep(0.01)
ThreadsFlag = False
progress_bar_updater.join()
for p in IOProcesses:
p.join(timeout=1)
for p in IOProcesses:
p.terminate()
p.join(timeout=0)
import hashlib
lockfile = os.path.join('locks', hashlib.sha1(output_folder.encode()).hexdigest().upper() + '.lock')
if os.path.isfile(lockfile):
os.remove(lockfile)
# input("Press Enter to continue...")
sys.exit()
|
__init__.py
|
import asyncio
import json
import re
import threading
import nonebot
from nonebot import on_command, NLPSession, on_startup, on_natural_language
from nonebot.command import Command, CommandSession
from .WifeClass import *
import difflib
from bot_config import GROUP_USE
def get_equal_rate(str1, str2):
return difflib.SequenceMatcher(None, str1, str2).quick_ratio()
async def WriteToWifesIndex():
with open('index.json', 'w+', encoding='utf-8') as f:
src = '['
for key in WifeDict:
Wifejson = json.dumps(WifeDict[key].getDict(), ensure_ascii=False)
src += Wifejson + ',\n'
src += ']'
src = src.replace(',\n]', ']')
f.write(src)
f.close()
def Read():
try:
with open('index.json', 'r+', encoding='utf-8') as f:
line = f.readline()
while line:
line = str(line).replace(
'[', '').replace(',\n', '').replace(']', '')
t = json.loads(line)
user_id = husband(t['husband'])
temp_wife = WifeObj(user_id)
temp_wife.height = t['height']
temp_wife.weight = t['weight']
temp_wife.name = t['name']
temp_wife.ouBai = t['ouBai']
temp_wife.liking = t['liking']
temp_wife.Character = t['character']
temp_wife.age = t['age']
temp_wife.isMerry = t['isMerry']
temp_wife.work = t['work']
temp_wife.race = t['race']
temp_wife.bud = t['bud']
try:
temp_wife.Hair = t['hair']
temp_wife.eyesColor = t['eyesColor']
temp_wife.WifeNickName = t['WifeNickName'] if not t['WifeNickName'] in BanTalkMember else '老婆'
temp_wife.HusbandNickName = t['HusbandNickName'] if not t[
'HusbandNickName'] in BanTalkMember else '老公'
except:
pass
temp_wife.addInWifeDict()
line = f.readline()
f.close()
except:
with open('index.json', 'a+', encoding='utf-8') as f:
f.close()
def ReadLove():
try:
f = open('love.txt', 'r', encoding='utf-8')
a = f.readline()
while a:
if not a in LoveTalkList:
LoveTalkList.append(a)
a = f.readline()
except:
f = open('love.txt', 'w+')
f.close()
try:
f = open('yandere.txt', 'r', encoding='utf-8')
a = f.readline().encode('utf-8').decode('utf-8')
while a:
if not a in YanDereList:
YanDereList.append(a)
a = f.readline()
except:
f = open('yandere.txt', 'w+', encoding='utf-8')
f.close()
try:
f = open('marrytalk.txt', 'r', encoding='utf-8')
a = f.readline().encode('utf-8').decode('utf-8')
while a:
if not a in MarryTalkList:
MarryTalkList.append(a)
a = f.readline()
except:
f = open('marrytalk.txt', 'w+', encoding='utf-8')
f.close()
@on_startup(func=Read)
@on_startup(func=ReadLove)
@on_command('getLove', only_to_me=True, aliases=('求分配女朋友'))
async def getGirlFirend(session: CommandSession):
group_id = str(session.event.group_id)
if group_id not in GROUP_USE:
return
try:
UserWife: WifeObj = await searchWife(session, True)
await session.send(f'你已经有{UserWife.name}了', at_sender=True)
return
except:
user = session.event['user_id']
t = WifeObj(husband(user))
await session.send(f'我是{t.name},我愿意与你在一起,今后请多关照')
t.addInWifeDict()
await WriteToWifesIndex()
return
bot = nonebot.get_bot()
async def searchWife(session: CommandSession, isFirst: bool):
id = session.event['user_id']
for key in WifeDict:
if WifeDict[key].husband.ID == id:
return WifeDict[key]
else:
if not isFirst:
await session.send(at_sender=True, message='你还没有女朋友呢,发送“求分配女朋友”来让上帝赐你一位吧,记得要好好珍惜')
return -1
@on_command('couple', only_to_me=False, aliases='分手')
async def couple(session: CommandSession):
group_id = str(session.event.group_id)
if group_id not in GROUP_USE:
return
try:
UserWife: WifeObj = await searchWife(session, False)
src = UserWife.couples()
await session.send(at_sender=True, message=src)
threading.Thread(target=save).start()
except:
pass
@on_command('getMarry', only_to_me=False, aliases='我们结婚吧')
async def getMarry(session: CommandSession):
group_id = str(session.event.group_id)
if group_id not in GROUP_USE:
return
try:
UserWife: WifeObj = await searchWife(session, False)
src = UserWife.getMarry() # 进入结婚函数
await session.send(at_sender=True, message=src)
threading.Thread(target=save).start()
except:
pass
@on_command('help_girl', only_to_me=False, aliases=('!help', '!help'))
async def help_girl(session: CommandSession):
group_id = str(session.event.group_id)
if group_id not in GROUP_USE:
return
await session.send(message='1.查询女朋友的信息\n'
'2.求分配女朋友\n'
'3.分手\n'
'4.呼叫女朋友的名or近似名\n'
'5.呼叫女朋友的昵称\n'
'6.老婆以后叫我+空格+昵称(请合法使用昵称)\n'
'7.老婆以后我叫你+空格+昵称(请合法使用昵称)\n')
@on_command('WifeCallToHusband', only_to_me=False, aliases='老婆以后叫我')
async def WifeCallToHusband(session: CommandSession):
group_id = str(session.event.group_id)
if group_id not in GROUP_USE:
return
masg = session.current_arg_text.replace('老婆以后叫我', '')
try:
UserWife: WifeObj = await searchWife(session, False)
await session.send(UserWife.setNickName(masg, isWife=False))
except:
return
@on_command('HusbandCallToWife', only_to_me=False, aliases='老婆以后我叫你')
async def HusbandCallToWife(session: CommandSession):
group_id = str(session.event.group_id)
if group_id not in GROUP_USE:
return
masg = session.current_arg_text.replace('老婆以后我叫你', '')
try:
UserWife: WifeObj = await searchWife(session, False)
await session.send(UserWife.setNickName(masg, isWife=True))
except:
return
@on_command('BanNickName', only_to_me=True, aliases='添加违禁词')
async def HusbandCallToWife(session: CommandSession):
if session.event['user_id'] == GOD:
masg = session.current_arg_text
try:
BanTalkMember.append(masg)
await session.send(f'添加{masg}成功')
except:
return
@on_command('getWifeIndex', only_to_me=False, aliases='查询女朋友的信息')
async def getWifeIndex(session: CommandSession):
group_id = str(session.event.group_id)
if group_id not in GROUP_USE:
return
try:
userWife: WifeObj = await searchWife(session, False)
except:
return
await bot.send_private_msg(user_id=userWife.husband.ID, message=userWife.WifeIndex())
await session.send(at_sender=True, message='已经将我的信息私给你了,要~保~密~哦')
await WriteToWifesIndex()
@on_command('show_girl', only_to_me=False, aliases='设置老婆形象')
async def couple(session: CommandSession):
group_id = str(session.event.group_id)
user_id = str(session.event.user_id)
if group_id not in GROUP_USE:
return
girl_show = str(session.get('girl_pic', prompt='请发送图片'))
try:
if "image" not in girl_show:
session.finish("你发送的不是图片!")
data = json.load(
open('bot_plugins/Get_girl/girl_pic.json', 'r', encoding='utf8'))
if user_id not in data:
new_obj = {
user_id: girl_show
}
data.update(new_obj)
else:
data[user_id] = girl_show
await session.send("老渣男了又换老婆")
with open('bot_plugins/Get_girl/girl_pic.json', 'w', encoding='utf-8') as f:
f.write(json.dumps(data, ensure_ascii=False))
f.close()
session.finish("成功")
except:
session.finish("[错误]内存溢出")
@on_command('show_girl_pic', only_to_me=False, aliases='老婆在吗')
async def _(session: CommandSession):
group_id = str(session.event.group_id)
user_id = str(session.event.user_id)
if group_id not in GROUP_USE:
return
data = json.load(
open('bot_plugins/Get_girl/girl_pic.json', 'r', encoding='utf8'))
if user_id not in data:
session.finish()
talk_lr = ['在呢亲爱的~', '有什么事吗~', '嗯嗯~']
talk = random.choice(talk_lr)
msg = f"{data[user_id]}{talk}"
await session.send(at_sender=True, message=msg)
@on_natural_language(only_to_me=False)
async def CallWife(session: NLPSession):
t1 = threading.Thread(target=sayLoving, args=(session,))
t1.start()
def save():
loop = asyncio.new_event_loop()
a = WriteToWifesIndex()
loop.run_until_complete(a)
def sayLoving(session: NLPSession):
msg = session.msg_text
if re.search('CQ:', msg) is not None or not 1 < len(msg) <= 10:
return
sendUser = session.event['user_id']
try:
if WifeDict[msg].husband.ID == sendUser:
sendLove(WifeDict[msg], session)
return
except:
pass
Max: float = 0.4 # 先定相似度
for key in WifeDict:
t = get_equal_rate(key, msg)
if (Max < t or msg == WifeDict[key].WifeNickName) and sendUser == WifeDict[key].getHusbandId():
sendLove(WifeDict[key], session)
threading.Thread(target=save).start()
return
def sendLove(A: WifeObj, session: NLPSession):
loop = asyncio.new_event_loop()
a = session.send(at_sender=True, message=f'{A.name}:{A.getLoveScence()}')
A.liking += 3 if A.liking < 1314520 else 0
loop.run_until_complete(a)
|
temp2.py
|
import threading
import queue
import time
import logging
logger = logging.getLogger(__name__)
class Send():
"""base class for a sender"""
def __init__(self, name, sQueue):
self.name = name
self.sQueue = sQueue
self.thread = threading.Thread(target=self.run, name=name)
self.thread.start()
def run(self):
""" no runner so far """
pass
class Receive():
"""base class for a receiver"""
def __init__(self, name, rQueue):
self.name = name
self.rQueue = rQueue
self.thread = threading.Thread(target=self.run, name=name)
self.thread.start()
def run(self):
""" no runner so far """
while True:
try:
s = self.rQueue.get(block=True, timeout=0.1)
except queue.Empty:
continue
self.processMessage(s)
def processMessage(self, s):
pass
class TestSend(Send):
def __init__(self, name, sQueue):
Send.__init__(self, name, sQueue)
def run(self):
while True:
"""simulate some event"""
time.sleep(1)
logger.info(
"{name:s}: push event 'sendEvent'".format(
name=self.name))
self.sQueue.put('event')
class PushbuttonSimulateSend(Send):
def __init__(self, name, sQueue):
Send.__init__(self, name, sQueue)
def run(self):
while True:
"""simulate some event"""
time.sleep(30)
logger.info(
"{name:s}: push event 'emergency'".format(
name=self.name))
self.sQueue.put('emergency')
time.sleep(30)
logger.info("{name:s}: push event 'normal'".format(name=self.name))
self.sQueue.put('normal')
class MotorReceive(Receive):
def __init__(self, name, rQueue):
Receive.__init__(self, name, rQueue)
def processMessage(self, s):
if 'on' == s:
logger.info("{name:s}: Motor on".format(name=self.name))
elif 'off' == s:
logger.info("{name:s}: Motor off".format(name=self.name))
else:
logger.error(
"{name:s}: Unknown message '{msg:s}'".format(
name=self.name, msg=s))
class Controller():
def __init__(self, name, inQueue, motor_front_left):
self.name = name
self.inQueue = inQueue
self.motor_front_left = motor_front_left
# controller has state
self.state = 'stateStart'
self.thread = threading.Thread(target=self.run, name=name)
self.thread.start()
def run(self):
countFailedStartAttempts = 0
while True:
try:
s = self.inQueue.get(block=True, timeout=0.1)
except queue.Empty:
continue
if self.state == 'stateStart':
if s == 'emergency':
self.motor_front_left.put("off")
self.state = 'stateEmergency'
elif s == 'event':
self.motor_front_left.put("on")
self.state = 'stateStarted'
elif self.state == 'stateStarted':
if s == 'emergency':
self.motor_front_left.put("off")
self.state = 'stateEmergency'
elif s == 'event':
countFailedStartAttempts += 1
if countFailedStartAttempts > 10:
countFailedStartAttempts = 0
logger.error(
"{name:s}: Motor already started, event ignored".format(
name=self.name))
elif self.state == 'stateEmergency':
if s == 'emergency':
self.motor_front_left.put("off")
elif s == 'normal':
self.state = 'stateStart'
logging.basicConfig(level=logging.DEBUG)
logger.info("Start")
sQueue = queue.Queue()
testSend = TestSend("simulate_send", sQueue)
emergency = PushbuttonSimulateSend("emergencyButton", sQueue)
rQueue = queue.Queue()
motor = MotorReceive("front_left", rQueue)
logger.info("Some test ---")
rQueue.put("off")
rQueue.put("unqrqrqrq")
controller = Controller('controller', sQueue, rQueue)
while True:
time.sleep(0.1)
|
gui.py
|
from vispy import app, io, scene, visuals
from vispy.visuals.transforms import MatrixTransform, STTransform
import datetime
import time
import threading
import pickle
import zmq
import numpy as np
# canvas = scene.SceneCanvas(keys='interactive', bgcolor='white',
# size=(800, 600), show=True)
# view = canvas.central_widget.add_view()
# view.camera = 'arcball'
# Plot3D = scene.visuals.create_visual_node(visuals.LinePlotVisual)
# def draw_sphere(msg):
# pos = msg['translation']
# radius = msg['radius']
# color = msg['color']
# sphere = scene.visuals.Sphere(radius=radius, color=color)
# sphere.transform = STTransform(translate=pos)
# view.add(sphere)
# objects.append(sphere)
# # def draw_line(msg):
# # pos = np.array([pos, [0, 0, 0]])
# # arrow = scene.visuals.Arrow()
# # arrow.set_data(pos=pos, color=color, arrows=pos)
# # view.add(arrow)
# # objects.append(arrow)
# objects = []
# def draw_scatter(points):
# scatter = scene.visuals.Markers()
# scatter.set_data(points, edge_color=None, face_color=(1, 1, 1, .5), size=5)
# view.add(scatter)
# objects.append(scatter)
# def draw_path(msg):
# pos = msg['path']
# print(pos.shape)
# color = msg['color']
# path = Plot3D(pos, parent=view.scene)
# # path = Plot3D(pos, width=2.0, color=color,
# # edge_color='w', symbol='o', face_color=(0.2, 0.2, 1, 0.8),
# # parent=view.scene)
# objects.append(path)
# def clear_view():
# global objects
# for obj in objects:
# obj.parent = None
# objects = []
# canvas.connect(on_key_press)
class Gui(scene.SceneCanvas):
def __init__(self, **kwargs):
scene.SceneCanvas.__init__(
self, title='pyversor', keys='interactive', bgcolor='white', size=(800, 600), show=True)
self.unfreeze()
self.view = self.central_widget.add_view()
self.view.camera = 'turntable'
self._worker_thread = threading.Thread(target=self.worker)
self._worker_thread.daemon = True
self._worker_thread.start()
self._base_frame = scene.visuals.XYZAxis()
self._objects = []
self.freeze()
def clear_view(self):
for obj in self._objects:
obj.parent = None
objects = []
def on_key_press(self, event):
if event.key.name == 'S':
filename = '{}-pyversor-screenshot.png'.format(
datetime.datetime.now().isoformat())
screenshot = self.render()
io.write_png(filename, screenshot)
print('Saved screenshot with filename: {}'.format(filename))
elif event.key.name == 'C':
self.clear_view()
elif event.key.name == 'F':
# Toggle base frame
if self._base_frame.parent is None:
self._base_frame.parent = self.view.scene
else:
self._base_frame.parent = None
def worker(self):
global objects
port = "5556"
context = zmq.Context()
socket = context.socket(zmq.PAIR)
# socket.bind("tcp://127.0.0.1:%s" % port)
socket.bind("tcp://*:%s" % port)
while True:
print("waiting")
data = socket.recv()
msg = pickle.loads(data)
type_ = msg['type']
if msg['clear']:
clear_view()
if type_ == 'sphere':
draw_sphere(msg)
elif type_ == 'path':
print('got path')
draw_path(msg)
if __name__ == '__main__':
canvas = Gui()
app.run()
|
upgrade_test.py
|
#!/usr/bin/env python3
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import glob
import os
from pathlib import Path
import platform
import random
import shutil
import stat
import subprocess
import sys
from threading import Thread, Event
import traceback
import time
from urllib import request
import hashlib
from local_cluster import LocalCluster, random_secret_string
SUPPORTED_PLATFORMS = ["x86_64"]
SUPPORTED_VERSIONS = ["7.2.0", "7.1.1", "7.1.0", "7.0.0", "6.3.24", "6.3.23",
"6.3.22", "6.3.18", "6.3.17", "6.3.16", "6.3.15", "6.3.13", "6.3.12", "6.3.9", "6.2.30",
"6.2.29", "6.2.28", "6.2.27", "6.2.26", "6.2.25", "6.2.24", "6.2.23", "6.2.22", "6.2.21",
"6.2.20", "6.2.19", "6.2.18", "6.2.17", "6.2.16", "6.2.15", "6.2.10", "6.1.13", "6.1.12",
"6.1.11", "6.1.10", "6.0.18", "6.0.17", "6.0.16", "6.0.15", "6.0.14", "5.2.8", "5.2.7",
"5.1.7", "5.1.6"]
FDB_DOWNLOAD_ROOT = "https://github.com/apple/foundationdb/releases/download/"
CURRENT_VERSION = "7.2.0"
HEALTH_CHECK_TIMEOUT_SEC = 5
PROGRESS_CHECK_TIMEOUT_SEC = 30
TRANSACTION_RETRY_LIMIT = 100
MAX_DOWNLOAD_ATTEMPTS = 5
RUN_WITH_GDB = False
def make_executable(path):
st = os.stat(path)
os.chmod(path, st.st_mode | stat.S_IEXEC)
def remove_file_no_fail(filename):
try:
os.remove(filename)
except OSError:
pass
def version_from_str(ver_str):
ver = [int(s) for s in ver_str.split(".")]
assert len(ver) == 3, "Invalid version string {}".format(ver_str)
return ver
def api_version_from_str(ver_str):
ver_tuple = version_from_str(ver_str)
return ver_tuple[0]*100+ver_tuple[1]*10
def version_before(ver_str1, ver_str2):
return version_from_str(ver_str1) < version_from_str(ver_str2)
def random_sleep(minSec, maxSec):
timeSec = random.uniform(minSec, maxSec)
print("Sleeping for {0:.3f}s".format(timeSec))
time.sleep(timeSec)
def compute_sha256(filename):
hash = hashlib.sha256()
with open(filename, 'rb') as f:
while True:
data = f.read(128*1024)
if not data:
break
hash.update(data)
return hash.hexdigest()
def read_to_str(filename):
with open(filename, 'r') as f:
return f.read()
class UpgradeTest:
def __init__(self, build_dir: str, upgrade_path: list, process_number: int = 1, port: str = None):
self.build_dir = Path(build_dir).resolve()
assert self.build_dir.exists(), "{} does not exist".format(build_dir)
assert self.build_dir.is_dir(), "{} is not a directory".format(build_dir)
self.upgrade_path = upgrade_path
for version in upgrade_path:
assert version in SUPPORTED_VERSIONS, "Unsupported version {}".format(
version)
self.platform = platform.machine()
assert self.platform in SUPPORTED_PLATFORMS, "Unsupported platform {}".format(
self.platform)
self.tmp_dir = self.build_dir.joinpath(
"tmp",
random_secret_string(16)
)
self.tmp_dir.mkdir(parents=True)
self.download_dir = self.build_dir.joinpath(
"tmp",
"old_binaries"
)
self.download_old_binaries()
self.create_external_lib_dir()
init_version = upgrade_path[0]
self.cluster = LocalCluster(
self.tmp_dir,
self.binary_path(init_version, "fdbserver"),
self.binary_path(init_version, "fdbmonitor"),
self.binary_path(init_version, "fdbcli"),
process_number,
port=port,
create_config=False
)
self.cluster.create_cluster_file()
self.configure_version(init_version)
self.log = self.cluster.log
self.etc = self.cluster.etc
self.data = self.cluster.data
self.input_pipe_path = self.tmp_dir.joinpath(
"input.{}".format(random_secret_string(8)))
self.output_pipe_path = self.tmp_dir.joinpath(
"output.{}".format(random_secret_string(8)))
os.mkfifo(self.input_pipe_path)
os.mkfifo(self.output_pipe_path)
self.progress_event = Event()
def binary_path(self, version, bin_name):
if version == CURRENT_VERSION:
return self.build_dir.joinpath("bin", bin_name)
else:
return self.download_dir.joinpath(version, bin_name)
def lib_dir(self, version):
if version == CURRENT_VERSION:
return self.build_dir.joinpath("lib")
else:
return self.download_dir.joinpath(version)
# Download an old binary of a given version from a remote repository
def download_old_binary(self, version, target_bin_name, remote_bin_name, makeExecutable):
local_file = self.binary_path(version, target_bin_name)
if (local_file.exists()):
return
# Download to a temporary file and then replace the target file atomically
# to avoid consistency errors in case of multiple tests are downloading the
# same file in parallel
local_file_tmp = Path("{}.{}".format(
str(local_file), random_secret_string(8)))
self.download_dir.joinpath(version).mkdir(
parents=True, exist_ok=True)
remote_file = "{}{}/{}".format(FDB_DOWNLOAD_ROOT,
version, remote_bin_name)
remote_sha256 = "{}.sha256".format(remote_file)
local_sha256 = Path("{}.sha256".format(local_file_tmp))
for attempt_cnt in range(MAX_DOWNLOAD_ATTEMPTS):
print("Downloading '{}' to '{}'...".format(
remote_file, local_file_tmp))
request.urlretrieve(remote_file, local_file_tmp)
print("Downloading '{}' to '{}'...".format(
remote_sha256, local_sha256))
request.urlretrieve(remote_sha256, local_sha256)
print("Download complete")
assert local_file_tmp.exists(), "{} does not exist".format(local_file_tmp)
assert local_sha256.exists(), "{} does not exist".format(local_sha256)
expected_checksum = read_to_str(local_sha256)
actual_checkum = compute_sha256(local_file_tmp)
if (expected_checksum == actual_checkum):
print("Checksum OK")
break
print("Checksum mismatch. Expected: {} Actual: {}".format(
expected_checksum, actual_checkum))
if attempt_cnt == MAX_DOWNLOAD_ATTEMPTS-1:
assert False, "Failed to download {} after {} attempts".format(
local_file_tmp, MAX_DOWNLOAD_ATTEMPTS)
os.rename(local_file_tmp, local_file)
os.remove(local_sha256)
if makeExecutable:
make_executable(local_file)
# Download all old binaries required for testing the specified upgrade path
def download_old_binaries(self):
for version in self.upgrade_path:
if version == CURRENT_VERSION:
continue
self.download_old_binary(version,
"fdbserver", "fdbserver.{}".format(self.platform), True)
self.download_old_binary(version,
"fdbmonitor", "fdbmonitor.{}".format(self.platform), True)
self.download_old_binary(version,
"fdbcli", "fdbcli.{}".format(self.platform), True)
self.download_old_binary(version,
"libfdb_c.so", "libfdb_c.{}.so".format(self.platform), False)
# Create a directory for external client libraries for MVC and fill it
# with the libraries necessary for the specified upgrade path
def create_external_lib_dir(self):
self.external_lib_dir = self.tmp_dir.joinpath("client_libs")
self.external_lib_dir.mkdir(parents=True)
for version in self.upgrade_path:
src_file_path = self.lib_dir(version).joinpath("libfdb_c.so")
assert src_file_path.exists(), "{} does not exist".format(src_file_path)
target_file_path = self.external_lib_dir.joinpath(
"libfdb_c.{}.so".format(version))
shutil.copyfile(src_file_path, target_file_path)
# Perform a health check of the cluster: Use fdbcli status command to check if the number of
# server processes and their versions are as expected
def health_check(self, timeout_sec=HEALTH_CHECK_TIMEOUT_SEC):
retries = 0
while retries < timeout_sec:
retries += 1
status = self.cluster.get_status()
if not "processes" in status["cluster"]:
print("Health check: no processes found. Retrying")
time.sleep(1)
continue
num_proc = len(status["cluster"]["processes"])
if (num_proc < self.cluster.process_number):
print("Health check: {} of {} processes found. Retrying".format(
num_proc, self.cluster.process_number))
time.sleep(1)
continue
assert num_proc == self.cluster.process_number, "Number of processes: expected: {}, actual: {}".format(
self.cluster.process_number, num_proc)
for (_, proc_stat) in status["cluster"]["processes"].items():
proc_ver = proc_stat["version"]
assert proc_ver == self.cluster_version, "Process version: expected: {}, actual: {}".format(
self.cluster_version, proc_ver)
print("Health check: OK")
return
assert False, "Health check: Failed"
# Create and save a cluster configuration for the given version
def configure_version(self, version):
self.cluster.fdbmonitor_binary = self.binary_path(
version, "fdbmonitor")
self.cluster.fdbserver_binary = self.binary_path(version, "fdbserver")
self.cluster.fdbcli_binary = self.binary_path(version, "fdbcli")
self.cluster.set_env_var = "LD_LIBRARY_PATH", self.lib_dir(version)
if (version_before(version, "7.1.0")):
self.cluster.use_legacy_conf_syntax = True
self.cluster.save_config()
self.cluster_version = version
# Upgrade the cluster to the given version
def upgrade_to(self, version):
print("Upgrading to version {}".format(version))
self.cluster.stop_cluster()
self.configure_version(version)
self.cluster.ensure_ports_released()
self.cluster.start_cluster()
print("Upgraded to {}".format(version))
def __enter__(self):
print("Starting cluster version {}".format(self.cluster_version))
self.cluster.start_cluster()
self.cluster.create_database(enable_tenants=False)
return self
def __exit__(self, xc_type, exc_value, traceback):
self.cluster.stop_cluster()
shutil.rmtree(self.tmp_dir)
# Determine FDB API version matching the upgrade path
def determine_api_version(self):
self.api_version = api_version_from_str(CURRENT_VERSION)
for version in self.upgrade_path:
self.api_version = min(
api_version_from_str(version), self.api_version)
# Start the tester to generate the workload specified by the test file
def exec_workload(self, test_file):
self.tester_retcode = 1
try:
self.determine_api_version()
cmd_args = [self.tester_bin,
'--cluster-file', self.cluster.cluster_file,
'--test-file', test_file,
'--external-client-dir', self.external_lib_dir,
'--disable-local-client',
'--input-pipe', self.input_pipe_path,
'--output-pipe', self.output_pipe_path,
'--api-version', str(self.api_version),
'--log',
'--log-dir', self.log,
'--tmp-dir', self.tmp_dir,
'--transaction-retry-limit', str(TRANSACTION_RETRY_LIMIT)]
if (RUN_WITH_GDB):
cmd_args = ['gdb', '-ex', 'run', '--args'] + cmd_args
print("Executing test command: {}".format(
" ".join([str(c) for c in cmd_args])))
self.tester_proc = subprocess.Popen(
cmd_args, stdout=sys.stdout, stderr=sys.stderr)
self.tester_retcode = self.tester_proc.wait()
self.tester_proc = None
if (self.tester_retcode != 0):
print("Tester failed with return code {}".format(
self.tester_retcode))
except Exception:
print("Execution of test workload failed")
print(traceback.format_exc())
# Perform a progress check: Trigger it and wait until it is completed
def progress_check(self, ctrl_pipe):
self.progress_event.clear()
os.write(ctrl_pipe, b"CHECK\n")
self.progress_event.wait(
None if RUN_WITH_GDB else PROGRESS_CHECK_TIMEOUT_SEC)
if (self.progress_event.is_set()):
print("Progress check: OK")
else:
assert False, "Progress check failed after upgrade to version {}".format(
self.cluster_version)
# The main function of a thread for reading and processing
# the notifications received from the tester
def output_pipe_reader(self):
try:
print("Opening pipe {} for reading".format(self.output_pipe_path))
self.output_pipe = open(self.output_pipe_path, 'r')
for line in self.output_pipe:
msg = line.strip()
print("Received {}".format(msg))
if (msg == "CHECK_OK"):
self.progress_event.set()
self.output_pipe.close()
except Exception as e:
print("Error while reading output pipe", e)
print(traceback.format_exc())
# Execute the upgrade test workflow according to the specified
# upgrade path: perform the upgrade steps and check success after each step
def exec_upgrade_test(self):
print("Opening pipe {} for writing".format(self.input_pipe_path))
ctrl_pipe = os.open(self.input_pipe_path, os.O_WRONLY)
try:
self.health_check()
self.progress_check(ctrl_pipe)
for version in self.upgrade_path[1:]:
random_sleep(0.0, 2.0)
self.upgrade_to(version)
self.health_check()
self.progress_check(ctrl_pipe)
os.write(ctrl_pipe, b"STOP\n")
finally:
os.close(ctrl_pipe)
# Kill the tester process if it is still alive
def kill_tester_if_alive(self, workload_thread):
if not workload_thread.is_alive():
return
if self.tester_proc is not None:
try:
print("Killing the tester process")
self.tester_proc.kill()
workload_thread.join(5)
except:
print("Failed to kill the tester process")
# The main method implementing the test:
# - Start a thread for generating the workload using a tester binary
# - Start a thread for reading notifications from the tester
# - Trigger the upgrade steps and checks in the main thread
def exec_test(self, args):
self.tester_bin = self.build_dir.joinpath("bin", "fdb_c_api_tester")
assert self.tester_bin.exists(), "{} does not exist".format(self.tester_bin)
self.tester_proc = None
test_retcode = 1
try:
workload_thread = Thread(
target=self.exec_workload, args=(args.test_file,))
workload_thread.start()
reader_thread = Thread(target=self.output_pipe_reader)
reader_thread.start()
self.exec_upgrade_test()
test_retcode = 0
except Exception:
print("Upgrade test failed")
print(traceback.format_exc())
self.kill_tester_if_alive(workload_thread)
finally:
workload_thread.join(5)
reader_thread.join(5)
self.kill_tester_if_alive(workload_thread)
if test_retcode == 0:
test_retcode = self.tester_retcode
return test_retcode
def grep_logs_for_events(self, severity):
return (
subprocess.getoutput(
"grep -r 'Severity=\"{}\"' {}".format(
severity,
self.cluster.log.as_posix())
)
.rstrip()
.splitlines()
)
# Check the cluster log for errors
def check_cluster_logs(self, error_limit=100):
sev40s = (
subprocess.getoutput(
"grep -r 'Severity=\"40\"' {}".format(
self.cluster.log.as_posix())
)
.rstrip()
.splitlines()
)
err_cnt = 0
for line in sev40s:
# When running ASAN we expect to see this message. Boost coroutine should be using the
# correct asan annotations so that it shouldn't produce any false positives.
if line.endswith(
"WARNING: ASan doesn't fully support makecontext/swapcontext functions and may produce false positives in some cases!"
):
continue
if (err_cnt < error_limit):
print(line)
err_cnt += 1
if err_cnt > 0:
print(
">>>>>>>>>>>>>>>>>>>> Found {} severity 40 events - the test fails", err_cnt)
else:
print("No errors found in logs")
return err_cnt == 0
# Check the server and client logs for warnings and dump them
def dump_warnings_in_logs(self, limit=100):
sev30s = (
subprocess.getoutput(
"grep -r 'Severity=\"30\"' {}".format(
self.cluster.log.as_posix())
)
.rstrip()
.splitlines()
)
if (len(sev30s) == 0):
print("No warnings found in logs")
else:
print(">>>>>>>>>>>>>>>>>>>> Found {} severity 30 events (warnings):".format(
len(sev30s)))
for line in sev30s[:limit]:
print(line)
# Dump the last cluster configuration and cluster logs
def dump_cluster_logs(self):
for etc_file in glob.glob(os.path.join(self.cluster.etc, "*")):
print(">>>>>>>>>>>>>>>>>>>> Contents of {}:".format(etc_file))
with open(etc_file, "r") as f:
print(f.read())
for log_file in glob.glob(os.path.join(self.cluster.log, "*")):
print(">>>>>>>>>>>>>>>>>>>> Contents of {}:".format(log_file))
with open(log_file, "r") as f:
print(f.read())
if __name__ == "__main__":
parser = ArgumentParser(
formatter_class=RawDescriptionHelpFormatter,
description="""
A script for testing FDB multi-version client in upgrade scenarios. Creates a local cluster,
generates a workload using fdb_c_api_tester with a specified test file, and performs
cluster upgrade according to the specified upgrade path. Checks if the workload successfully
progresses after each upgrade step.
""",
)
parser.add_argument(
"--build-dir",
"-b",
metavar="BUILD_DIRECTORY",
help="FDB build directory",
required=True,
)
parser.add_argument(
'--upgrade-path',
nargs='+',
help='Cluster upgrade path: a space separated list of versions',
default=[CURRENT_VERSION]
)
parser.add_argument(
'--test-file',
help='A .toml file describing a test workload to be generated with fdb_c_api_tester',
required=True,
)
parser.add_argument(
"--process-number",
"-p",
help="Number of fdb processes running (default: 0 - random)",
type=int,
default=0,
)
parser.add_argument(
'--disable-log-dump',
help='Do not dump cluster log on error',
action="store_true"
)
parser.add_argument(
'--run-with-gdb',
help='Execute the tester binary from gdb',
action="store_true"
)
args = parser.parse_args()
if (args.process_number == 0):
args.process_number = random.randint(1, 5)
print("Testing with {} processes".format(args.process_number))
if (args.run_with_gdb):
RUN_WITH_GDB = True
errcode = 1
with UpgradeTest(args.build_dir, args.upgrade_path, args.process_number) as test:
print("log-dir: {}".format(test.log))
print("etc-dir: {}".format(test.etc))
print("data-dir: {}".format(test.data))
print("cluster-file: {}".format(test.etc.joinpath("fdb.cluster")))
errcode = test.exec_test(args)
if not test.check_cluster_logs():
errcode = 1 if errcode == 0 else errcode
test.dump_warnings_in_logs()
if errcode != 0 and not args.disable_log_dump:
test.dump_cluster_logs()
sys.exit(errcode)
|
tests.py
|
# -*- coding: utf-8 -*-
import os
import shutil
import sys
import tempfile
import time
import unittest
from cStringIO import StringIO
from django.conf import settings
from django.core.exceptions import SuspiciousOperation
from django.core.files.base import ContentFile
from django.core.files.images import get_image_dimensions
from django.core.files.storage import FileSystemStorage
from django.core.files.uploadedfile import UploadedFile
from unittest import TestCase
try:
import threading
except ImportError:
import dummy_threading as threading
try:
# Checking for the existence of Image is enough for CPython, but
# for PyPy, you need to check for the underlying modules
from PIL import Image, _imaging
except ImportError:
Image = None
class FileStorageTests(unittest.TestCase):
storage_class = FileSystemStorage
def setUp(self):
self.temp_dir = tempfile.mktemp()
os.makedirs(self.temp_dir)
self.storage = self.storage_class(location=self.temp_dir)
def tearDown(self):
os.rmdir(self.temp_dir)
def test_file_access_options(self):
"""
Standard file access options are available, and work as expected.
"""
self.failIf(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'w')
f.write('storage contents')
f.close()
self.assert_(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'r')
self.assertEqual(f.read(), 'storage contents')
f.close()
self.storage.delete('storage_test')
self.failIf(self.storage.exists('storage_test'))
def test_file_storage_prevents_directory_traversal(self):
"""
File storage prevents directory traversal (files can only be accessed if
they're below the storage location).
"""
self.assertRaises(SuspiciousOperation, self.storage.exists, '..')
self.assertRaises(SuspiciousOperation, self.storage.exists, '/etc/passwd')
class CustomStorage(FileSystemStorage):
def get_available_name(self, name):
"""
Append numbers to duplicate files rather than underscores, like Trac.
"""
parts = name.split('.')
basename, ext = parts[0], parts[1:]
number = 2
while self.exists(name):
name = '.'.join([basename, str(number)] + ext)
number += 1
return name
class CustomStorageTests(FileStorageTests):
storage_class = CustomStorage
def test_custom_get_available_name(self):
first = self.storage.save('custom_storage', ContentFile('custom contents'))
self.assertEqual(first, 'custom_storage')
second = self.storage.save('custom_storage', ContentFile('more contents'))
self.assertEqual(second, 'custom_storage.2')
self.storage.delete(first)
self.storage.delete(second)
class UnicodeFileNameTests(unittest.TestCase):
def test_unicode_file_names(self):
"""
Regression test for #8156: files with unicode names I can't quite figure
out the encoding situation between doctest and this file, but the actual
repr doesn't matter; it just shouldn't return a unicode object.
"""
uf = UploadedFile(name=u'¿Cómo?',content_type='text')
self.assertEqual(type(uf.__repr__()), str)
# Tests for a race condition on file saving (#4948).
# This is written in such a way that it'll always pass on platforms
# without threading.
class SlowFile(ContentFile):
def chunks(self):
time.sleep(1)
return super(ContentFile, self).chunks()
class FileSaveRaceConditionTest(TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
self.thread = threading.Thread(target=self.save_file, args=['conflict'])
def tearDown(self):
shutil.rmtree(self.storage_dir)
def save_file(self, name):
name = self.storage.save(name, SlowFile("Data"))
def test_race_condition(self):
self.thread.start()
name = self.save_file('conflict')
self.thread.join()
self.assert_(self.storage.exists('conflict'))
self.assert_(self.storage.exists('conflict_'))
self.storage.delete('conflict')
self.storage.delete('conflict_')
class FileStoragePermissions(TestCase):
def setUp(self):
self.old_perms = settings.FILE_UPLOAD_PERMISSIONS
settings.FILE_UPLOAD_PERMISSIONS = 0666
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
settings.FILE_UPLOAD_PERMISSIONS = self.old_perms
shutil.rmtree(self.storage_dir)
def test_file_upload_permissions(self):
name = self.storage.save("the_file", ContentFile("data"))
actual_mode = os.stat(self.storage.path(name))[0] & 0777
self.assertEqual(actual_mode, 0666)
class FileStoragePathParsing(TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_directory_with_dot(self):
"""Regression test for #9610.
If the directory name contains a dot and the file name doesn't, make
sure we still mangle the file name instead of the directory name.
"""
self.storage.save('dotted.path/test', ContentFile("1"))
self.storage.save('dotted.path/test', ContentFile("2"))
self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path')))
self.assertTrue(os.path.exists(os.path.join(self.storage_dir, 'dotted.path/test')))
self.assertTrue(os.path.exists(os.path.join(self.storage_dir, 'dotted.path/test_')))
def test_first_character_dot(self):
"""
File names with a dot as their first character don't have an extension,
and the underscore should get added to the end.
"""
self.storage.save('dotted.path/.test', ContentFile("1"))
self.storage.save('dotted.path/.test', ContentFile("2"))
self.assertTrue(os.path.exists(os.path.join(self.storage_dir, 'dotted.path/.test')))
# Before 2.6, a leading dot was treated as an extension, and so
# underscore gets added to beginning instead of end.
if sys.version_info < (2, 6):
self.assertTrue(os.path.exists(os.path.join(self.storage_dir, 'dotted.path/_.test')))
else:
self.assertTrue(os.path.exists(os.path.join(self.storage_dir, 'dotted.path/.test_')))
if Image is not None:
class DimensionClosingBug(TestCase):
"""
Test that get_image_dimensions() properly closes files (#8817)
"""
def test_not_closing_of_files(self):
"""
Open files passed into get_image_dimensions() should stay opened.
"""
empty_io = StringIO()
try:
get_image_dimensions(empty_io)
finally:
self.assert_(not empty_io.closed)
def test_closing_of_filenames(self):
"""
get_image_dimensions() called with a filename should closed the file.
"""
# We need to inject a modified open() builtin into the images module
# that checks if the file was closed properly if the function is
# called with a filename instead of an file object.
# get_image_dimensions will call our catching_open instead of the
# regular builtin one.
class FileWrapper(object):
_closed = []
def __init__(self, f):
self.f = f
def __getattr__(self, name):
return getattr(self.f, name)
def close(self):
self._closed.append(True)
self.f.close()
def catching_open(*args):
return FileWrapper(open(*args))
from django.core.files import images
images.open = catching_open
try:
get_image_dimensions(os.path.join(os.path.dirname(__file__), "test1.png"))
finally:
del images.open
self.assert_(FileWrapper._closed)
|
loader.py
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# --------------------------------------------------------------------------
import bisect
import os
import sys
from collections import defaultdict
from typing import List, Tuple
from .. import consts, io, utils
from ..multiprocessing import Process, Queue
from ..run import Run, RunProfile
from .data import DistributedRunProfileData, RunProfileData
from .node import CommunicationNode
from .run_generator import DistributedRunGenerator, RunGenerator
logger = utils.get_logger()
class RunLoader(object):
def __init__(self, name, run_dir, caches: io.Cache):
self.run_name = name
self.run_dir = run_dir
self.caches = caches
self.queue = Queue()
def load(self):
workers = []
spans_by_workers = defaultdict(list)
for path in io.listdir(self.run_dir):
if io.isdir(io.join(self.run_dir, path)):
continue
match = consts.WORKER_PATTERN.match(path)
if not match:
continue
worker = match.group(1)
span = match.group(2)
if span is not None:
# remove the starting dot (.)
span = span[1:]
bisect.insort(spans_by_workers[worker], span)
workers.append((worker, span, path))
span_index_map = {}
for worker, span_array in spans_by_workers.items():
for i, span in enumerate(span_array, 1):
span_index_map[(worker, span)] = i
for worker, span, path in workers:
# convert the span timestamp to the index.
span_index = None if span is None else span_index_map[(worker, span)]
p = Process(target=self._process_data, args=(worker, span_index, path))
p.start()
logger.info('started all processing')
distributed_run = Run(self.run_name, self.run_dir)
run = Run(self.run_name, self.run_dir)
num_items = len(workers)
while num_items > 0:
item: Tuple[RunProfile, DistributedRunProfileData] = self.queue.get()
num_items -= 1
r, d = item
if r or d:
logger.debug('Loaded profile via mp.Queue')
if r is not None:
run.add_profile(r)
if d is not None:
distributed_run.add_profile(d)
distributed_profiles = self._process_spans(distributed_run)
for d in distributed_profiles:
if d is not None:
run.add_profile(d)
# for no daemon process, no need to join them since it will automatically join
return run
def _process_data(self, worker, span, path):
import absl.logging
absl.logging.use_absl_handler()
try:
logger.debug('Parse trace, run_dir=%s, worker=%s', self.run_dir, path)
local_file = self.caches.get_remote_cache(io.join(self.run_dir, path))
data = RunProfileData.parse(worker, span, local_file, self.caches.cache_dir)
if data.trace_file_path != local_file:
self.caches.add_file(local_file, data.trace_file_path)
generator = RunGenerator(worker, span, data)
profile = generator.generate_run_profile()
dist_data = DistributedRunProfileData(data)
logger.debug('Sending back profile via mp.Queue')
self.queue.put((profile, dist_data))
except KeyboardInterrupt:
logger.warning('tb_plugin receive keyboard interrupt signal, process %d will exit' % (os.getpid()))
sys.exit(1)
except Exception as ex:
logger.warning('Failed to parse profile data for Run %s on %s. Exception=%s',
self.run_name, worker, ex, exc_info=True)
self.queue.put((None, None))
logger.debug('finishing process data')
def _process_spans(self, distributed_run: Run):
spans = distributed_run.get_spans()
if spans is None:
return [self._process_distributed_profiles(distributed_run.get_profiles(), None)]
else:
span_profiles = []
for span in spans:
profiles = distributed_run.get_profiles(span=span)
p = self._process_distributed_profiles(profiles, span)
if p is not None:
span_profiles.append(p)
return span_profiles
def _process_distributed_profiles(self, profiles: List[DistributedRunProfileData], span):
has_communication = True
comm_node_lists: List[List[CommunicationNode]] = []
for data in profiles:
logger.debug('Processing profile data')
# Set has_communication to False and disable distributed view if any one worker has no communication
if data.has_communication and data.comm_node_list:
comm_node_lists.append(data.comm_node_list)
if len(comm_node_lists[-1]) != len(comm_node_lists[0]):
logger.error("Number of communication operation nodes don't match between workers in run: %s"
% self.run_name)
has_communication = False
else:
has_communication = False
logger.debug('Processing profile data finish')
if not has_communication:
logger.debug('There is no communication profile in this run.')
return None
worker_num = len(comm_node_lists)
for i, node in enumerate(comm_node_lists[0]):
kernel_range_size = len(node.kernel_ranges)
# loop for all communication kernel ranges in order
for j in range(kernel_range_size):
min_range = sys.maxsize
# For each kernel_range, find the minist between workers as the real communication time
for k in range(worker_num):
kernel_ranges = comm_node_lists[k][i].kernel_ranges
if len(kernel_ranges) != kernel_range_size:
logger.error("Number of communication kernels don't match between workers in run: %s"
% self.run_name)
has_communication = False
return None
if kernel_ranges:
if kernel_ranges[j][1] - kernel_ranges[j][0] < min_range:
min_range = kernel_ranges[j][1] - kernel_ranges[j][0]
for k in range(worker_num):
kernel_range = comm_node_lists[k][i].kernel_ranges[j]
comm_node_lists[k][i].real_time_ranges.append((kernel_range[1] - min_range, kernel_range[1]))
for data in profiles:
data.communication_parse()
generator = DistributedRunGenerator(profiles, span)
profile = generator.generate_run_profile()
return profile
|
processV0.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import tempfile
import subprocess
import tensorflow as tf
import numpy as np
import tfimage as im
import threading
import time
import multiprocessing
"""
# Resize source images
python tools/process.py --input_dir photos/original --operation resize --output_dir photos/resized
# Create images with blank centers
python tools/process.py --input_dir photos/resized --operation blank --output_dir photos/blank
# Combine resized images with blanked images (A-B image pairs)
python tools/process.py --input_dir photos/color_tmp --b_dir photos/line_tmp --operation combine --output_dir photos
# Split into train/val set
python tools/split.py --dir photos/combined
# edge needs caffe, not working (yet)
"""
edge_pool = None
parser = argparse.ArgumentParser()
parser.add_argument("--input_dir", required=True, help="path to folder containing images")
parser.add_argument("--output_dir", required=True, help="output path")
parser.add_argument("--operation", required=True, choices=["grayscale", "resize", "blank", "combine", "edges"])
parser.add_argument("--workers", type=int, default=1, help="number of workers")
# resize
parser.add_argument("--pad", action="store_true", help="pad instead of crop for resize operation")
parser.add_argument("--size", type=int, default=256, help="size to use for resize operation")
# combine
parser.add_argument("--b_dir", type=str, help="path to folder containing B images for combine operation")
a = parser.parse_args()
def resize(src):
height, width, _ = src.shape
dst = src
if height != width:
if a.pad:
size = max(height, width)
# pad to correct ratio
oh = (size - height) // 2
ow = (size - width) // 2
dst = im.pad(image=dst, offset_height=oh, offset_width=ow, target_height=size, target_width=size)
else:
# crop to correct ratio
size = min(height, width)
oh = (height - size) // 2
ow = (width - size) // 2
dst = im.crop(image=dst, offset_height=oh, offset_width=ow, target_height=size, target_width=size)
assert(dst.shape[0] == dst.shape[1])
size, _, _ = dst.shape
if size > a.size:
dst = im.downscale(images=dst, size=[a.size, a.size])
elif size < a.size:
dst = im.upscale(images=dst, size=[a.size, a.size])
return dst
def blank(src):
height, width, _ = src.shape
if height != width:
raise Exception("non-square image")
image_size = width
size = int(image_size * 0.3)
offset = int(image_size / 2 - size / 2)
dst = src
dst[offset:offset + size,offset:offset + size,:] = np.ones([size, size, 3])
return dst
def combine(src, src_path):
if a.b_dir is None:
raise Exception("missing b_dir")
# find corresponding file in b_dir, could have a different extension
basename, _ = os.path.splitext(os.path.basename(src_path))
for ext in [".png", ".jpg"]:
sibling_path = os.path.join(a.b_dir, basename + ext)
if os.path.exists(sibling_path):
sibling = im.load(sibling_path)
break
else:
raise Exception("could not find sibling image for " + src_path)
# make sure that dimensions are correct
height, width, _ = src.shape
if height != sibling.shape[0] or width != sibling.shape[1]:
raise Exception("differing sizes")
# convert both images to RGB if necessary
if src.shape[2] == 1:
src = im.grayscale_to_rgb(images=src)
if sibling.shape[2] == 1:
sibling = im.grayscale_to_rgb(images=sibling)
# remove alpha channel
if src.shape[2] == 4:
src = src[:,:,:3]
if sibling.shape[2] == 4:
sibling = sibling[:,:,:3]
return np.concatenate([src, sibling], axis=1)
def grayscale(src):
return im.grayscale_to_rgb(images=im.rgb_to_grayscale(images=src))
net = None
def run_caffe(src):
# lazy load caffe and create net
global net
if net is None:
# don't require caffe unless we are doing edge detection
os.environ["GLOG_minloglevel"] = "2" # disable logging from caffe
import caffe
# using this requires using the docker image or assembling a bunch of dependencies
# and then changing these hardcoded paths
net = caffe.Net("/opt/caffe/examples/hed/deploy.prototxt", "/opt/caffe/hed_pretrained_bsds.caffemodel", caffe.TEST)
net.blobs["data"].reshape(1, *src.shape)
net.blobs["data"].data[...] = src
net.forward()
return net.blobs["sigmoid-fuse"].data[0][0,:,:]
def edges(src):
# based on https://github.com/phillipi/pix2pix/blob/master/scripts/edges/batch_hed.py
# and https://github.com/phillipi/pix2pix/blob/master/scripts/edges/PostprocessHED.m
import scipy.io
src = src * 255
border = 128 # put a padding around images since edge detection seems to detect edge of image
src = src[:,:,:3] # remove alpha channel if present
src = np.pad(src, ((border, border), (border, border), (0,0)), "reflect")
src = src[:,:,::-1]
src -= np.array((104.00698793,116.66876762,122.67891434))
src = src.transpose((2, 0, 1))
# [height, width, channels] => [batch, channel, height, width]
fuse = edge_pool.apply(run_caffe, [src])
fuse = fuse[border:-border, border:-border]
with tempfile.NamedTemporaryFile(suffix=".png") as png_file, tempfile.NamedTemporaryFile(suffix=".mat") as mat_file:
scipy.io.savemat(mat_file.name, {"input": fuse})
octave_code = r"""
E = 1-load(input_path).input;
E = imresize(E, [image_width,image_width]);
E = 1 - E;
E = single(E);
[Ox, Oy] = gradient(convTri(E, 4), 1);
[Oxx, ~] = gradient(Ox, 1);
[Oxy, Oyy] = gradient(Oy, 1);
O = mod(atan(Oyy .* sign(-Oxy) ./ (Oxx + 1e-5)), pi);
E = edgesNmsMex(E, O, 1, 5, 1.01, 1);
E = double(E >= max(eps, threshold));
E = bwmorph(E, 'thin', inf);
E = bwareaopen(E, small_edge);
E = 1 - E;
E = uint8(E * 255);
imwrite(E, output_path);
"""
config = dict(
input_path="'%s'" % mat_file.name,
output_path="'%s'" % png_file.name,
# image_width=256,
image_width = a.size, # yw notes
threshold=25.0/255.0,
small_edge=5,
)
args = ["octave"]
for k, v in config.items():
args.extend(["--eval", "%s=%s;" % (k, v)])
args.extend(["--eval", octave_code])
try:
subprocess.check_output(args, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print("octave failed")
print("returncode:", e.returncode)
print("output:", e.output)
raise
return im.load(png_file.name)
def process(src_path, dst_path):
src = im.load(src_path)
if a.operation == "grayscale":
dst = grayscale(src)
elif a.operation == "resize":
dst = resize(src)
elif a.operation == "blank":
dst = blank(src)
elif a.operation == "combine":
dst = combine(src, src_path)
elif a.operation == "edges":
dst = edges(src)
else:
raise Exception("invalid operation")
im.save(dst, dst_path)
complete_lock = threading.Lock()
start = None
num_complete = 0
total = 0
def complete():
global num_complete, rate, last_complete
with complete_lock:
num_complete += 1
now = time.time()
elapsed = now - start
rate = num_complete / elapsed
if rate > 0:
remaining = (total - num_complete) / rate
else:
remaining = 0
print("%d/%d complete %0.2f images/sec %dm%ds elapsed %dm%ds remaining" % (num_complete, total, rate, elapsed // 60, elapsed % 60, remaining // 60, remaining % 60))
last_complete = now
def main():
if not os.path.exists(a.output_dir):
os.makedirs(a.output_dir)
src_paths = []
dst_paths = []
skipped = 0
for src_path in im.find(a.input_dir):
name, _ = os.path.splitext(os.path.basename(src_path))
dst_path = os.path.join(a.output_dir, name + ".png")
if os.path.exists(dst_path):
skipped += 1
else:
src_paths.append(src_path)
dst_paths.append(dst_path)
print("skipping %d files that already exist" % skipped)
global total
total = len(src_paths)
print("processing %d files" % total)
global start
start = time.time()
if a.operation == "edges":
# use a multiprocessing pool for this operation so it can use multiple CPUs
# create the pool before we launch processing threads
global edge_pool
edge_pool = multiprocessing.Pool(a.workers)
if a.workers == 1:
with tf.Session() as sess:
for src_path, dst_path in zip(src_paths, dst_paths):
process(src_path, dst_path)
complete()
else:
queue = tf.train.input_producer(zip(src_paths, dst_paths), shuffle=False, num_epochs=1)
dequeue_op = queue.dequeue()
def worker(coord):
with sess.as_default():
while not coord.should_stop():
try:
src_path, dst_path = sess.run(dequeue_op)
except tf.errors.OutOfRangeError:
coord.request_stop()
break
process(src_path, dst_path)
complete()
# init epoch counter for the queue
local_init_op = tf.local_variables_initializer()
with tf.Session() as sess:
sess.run(local_init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for i in range(a.workers):
t = threading.Thread(target=worker, args=(coord,))
t.start()
threads.append(t)
try:
coord.join(threads)
except KeyboardInterrupt:
coord.request_stop()
coord.join(threads)
main()
|
thread_crawl_taobao.py
|
# -*- coding:utf-8 -*-
import re
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from pyquery import PyQuery as pq
import requests
import time
import urllib
import os
import json
import threading
chromeOptions = webdriver.ChromeOptions()
# 设置代理
chromeOptions.add_argument("--proxy-server=223.243.5.161:4216")
chromedriver="C:\Program Files (x86)\Google\Chrome\Application\chromedriver.exe"
browser=webdriver.Chrome(executable_path=chromedriver)
print ('请使用淘宝APP扫描登录,请勿操作鼠标键盘!请保持优雅勿频繁(间隔小于1分钟)登录以减轻服务器负载。')
wait=WebDriverWait(browser,20)
search_key = '温宿'
data_path = 'F://crawl_data/'
data_dir = data_path + search_key + '/'
def search():
try:
url="https://www.taobao.com"
browser.get(url)
input = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, "#q")))
submit=wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR,'#J_TSearchForm > div.search-button > button')))
input.send_keys(search_key)
submit.click()
total=wait.until(EC.presence_of_element_located((By.CSS_SELECTOR,'#mainsrp-pager > div > div > div > div.total')))
get_products()
return total.text
except:
search()
def next_page(page_number):
try:
input = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, "#mainsrp-pager > div > div > div > div.form > input")))
submit = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, '#mainsrp-pager > div > div > div > div.form > span.btn.J_Submit')))
input.clear()
input.send_keys(page_number)
submit.click()
wait.until(EC.text_to_be_present_in_element((By.CSS_SELECTOR,'#mainsrp-pager > div > div > div > ul > li.item.active > span'),str(page_number)))
get_products()
except:
next_page(page_number)
def get_products():
try:
wait.until(EC.presence_of_element_located((By.CSS_SELECTOR,'#mainsrp-itemlist .items .item')))#加载宝贝信息并等待
except Exception as e:
print(e)
html=browser.page_source
doc=pq(html)
items=doc('#mainsrp-itemlist .items .item').items()#得到所有宝贝的内容
for item in items:
time.sleep(3)
product={
'image':'https:' + item.find('.pic .img').attr('data-src'),#图片链接
'price':item.find('.price').text(),#商品价格
'deal':item.find('.deal-cnt').text()[:-3],#付款人数,-3是为了去掉人付款这几个字
'title':item.find('.title').text(),#商品名称
'shop':item.find('.shop').text(),#店铺名称
'location':item.find('.location').text(),
'href':'https:' + item.find('.pic .pic-link').attr('href')
}
print(product)
try:
f = open(data_dir + '/data/data.json', 'a', encoding="utf-8")
j = json.dumps(product, ensure_ascii=False)
f.write(str(j) + '\n')
f.close()
except Exception as e:
print(e)
try:
f=requests.get('https:' + item.find('.pic .img').attr('data-src'))
filename = item.find('.title').text()
filename = eval(repr(filename).replace('\\', '-'))
filename = eval(repr(filename).replace('/', '-'))
filename = eval(repr(filename).replace('*', 'x'))
filename = eval(repr(filename).replace('?', ''))
filename = eval(repr(filename).replace('>', ''))
filename = eval(repr(filename).replace('<', ''))
filename = eval(repr(filename).replace('|', ''))
filename = eval(repr(filename).replace(',', ' '))
filename = eval(repr(filename).replace('"', ''))
filename = eval(repr(filename).replace(':', ''))
filename = eval(repr(filename).replace(';', ' '))
filename = eval(repr(filename).replace(';', ' '))
filename = data_dir + '/img/' + filename + '.jpg'
if not os.path.exists(filename):
with open(filename, "wb") as code:
code.write(f.content)
except Exception as e:
print(e)
def run(n, semaphore):
semaphore.acquire() #加锁
time.sleep(1)
print("run the thread:%s\n" % n)
next_page(n)
semaphore.release() #释放
def main():
start_time = time.time()
# 判断目录是否存在,不存在创建目录
if not os.path.exists(data_dir):
os.mkdir(data_dir)
if not os.path.exists(data_dir + '/img/'):
os.mkdir(data_dir + '/img/')
if not os.path.exists(data_dir + '/data/'):
os.mkdir(data_dir + '/data/')
total=search()
print(total)
total=int(re.compile('(\d+)').search(total).group(1)) #转换为数值型
semaphore = threading.BoundedSemaphore(5) # 最多允许5个线程同时运行
for i in range(2,total+1):
t = threading.Thread(target=run, args=(i, semaphore))
t.start()
while threading.active_count() != 1:
pass
else:
print('-----all threads done-----')
end_time = time.time()
print("Execution Time: %.2f sec" %(end_time - start_time))
if __name__=='__main__':
main()
|
pgc01.py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2019 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import ctypes
import gc
import json
import os
import sys
import threading
def child(pipe_name):
gc.set_debug(gc.DEBUG_SAVEALL)
gc.collect()
objlist = [id(o) for o in gc.garbage]
del gc.garbage[:]
fdwrite = os.open(pipe_name, os.O_WRONLY)
os.write(fdwrite, json.dumps(objlist))
os.close(fdwrite)
sys.exit(-1)
def parent(pipe_name):
fdread = open(pipe_name, 'r')
objlist = json.loads(fdread.read())
for objid in objlist:
o = ctypes.cast(objid, ctypes.py_object).value
print '********* collect garbage object: ', o
if isinstance(o, list):
del o[:]
elif isinstance(o, dict):
o.clear()
else:
print o
del objlist[:]
fdread.close()
os.remove(pipe_name)
def collect():
gc.disable()
pipe_name = 'fifo_%s#%s' % (
os.path.splitext(os.path.basename(sys.argv[0]))[0], os.getpid())
if os.path.exists(pipe_name):
os.unlink(pipe_name)
os.mkfifo(pipe_name)
pid = os.fork()
if pid == 0:
child(pipe_name)
t = threading.Thread(target=parent, args=(pipe_name,))
t.start()
t.join()
def main():
l = []
l.append(l)
d = {}
d[1] = l
d[2] = d
del l
del d
collect()
if __name__ == '__main__':
main()
|
fifo.py
|
import threading
import time
import random
resultados = []
tiempos = []
tiempoEspera = 0
mutexActivo = threading.Semaphore(1)
torniquete = threading.Semaphore()
def ejecucion(idProceso):
global tiempoEspera
if(tiempoEspera == 0):
resultados[2][idProceso] = tiempoEspera
resultados[1][idProceso] = tiempos[2][idProceso]
tiempoEspera = tiempos[2][idProceso] + tiempos[1][idProceso]
else:
aux = tiempoEspera - tiempos[1][idProceso]
resultados[2][idProceso] = round(aux,2)
resultados[1][idProceso] = round(resultados[2][idProceso] + tiempos[2][idProceso],2)
tiempoEspera += tiempos[2][idProceso]
resultados[3][idProceso] = round(float(tiempos[2][idProceso] / resultados[1][idProceso]),2)
resultados[4][idProceso] = round(float(resultados[1][idProceso] / tiempos[2][idProceso]),2)
def proceso(idProceso):
global tiempos
torniquete.release()
torniquete.acquire()
time.sleep(tiempos[1][idProceso])
torniquete.release()
mutexActivo.acquire()
ejecucion(idProceso)
mutexActivo.release()
def lanza_hilos():
for i in range(5):
threading.Thread(target=proceso, args=[i]).start()
def ff(ti, res):
global tiempos
global resultados
resultados = res
tiempos = ti
lanza_hilos()
time.sleep(15)
print("FCFS")
for i in range(len(resultados)):
print("Proceso: %s -> T:%d E:%d P:%d R:%d" %(resultados[0][i],resultados[1][i],resultados[2][i],resultados[3][i],resultados[4][i]))
|
mcp23x17.py
|
"""mcp23x17.py"""
import time
import warnings
from threading import Thread
from abc import abstractmethod, ABCMeta
from .devices import Device
class MCP23x17(Device):
"""Class representing mcp23x17 chips.
Steps for interrupts:
- Enable interrupt on pin through GPINTEN register
- Define which type of signal will cause the interrupt through registers
INTCON and DEFVAL
- When an interrupt occur the bit in INT register is set.
- The interrupt bit remains active until the intcap register(the value
of gpio when the interrupt occured) or the gpio register is read.
- The first interrupt event causes the port contents to becopied into
the INTCAP register. Subsequent interruptconditions on the port
will not cause an interrupt tooccur as long as the interrupt is
not cleared by a readof INTCAP or GPIO.
Seq and banks:
The module supports byte mode which doesn't increment the register
counter. If has byte mode enable and bank 0 then the counter toggles
between the two registers A,B.
"""
def __init__(self):
super(MCP23x17, self).__init__(name="", max_data_length=0)
self._set_registers(0)
self._debounce = {} # Dictionary with debounce time for pins
self._int_handlers = {} # Dictionary with int handling function for pins
self._poll_async = False
self._poll_flag = False
def set_pin_debounce(self, pin_num, value):
"""Set the debounce time for a pin.
Args:
pin_num (str): The pin number, it must be in the form of A_x or
B_x.
value (int): The debounce time in ms.
Raises:
TypeError: Error when the type of value is not int.
"""
if not isinstance(value, int):
raise TypeError("Wrong value type, should be int")
# Call it for the type checking
_ = self._get_chunk_number(pin_num)
self._debounce[pin_num] = value * 1e-3
def set_int_handl_func(self, pin_num, func, *args):
"""Set interrupt handling function for a pin
Args:
pin_num (str): The pin number, it must be in the form of A_x or
B_x.
func: The function to be called when the interrupt occur.
*args: The arguments of func.
"""
def caller():
t_now = time.time()
limit = self._debounce[pin_num]
if t_now - caller.t_s > limit:
func(*args)
caller.t_s = t_now
self.get_intcap(pin_num)
caller.t_s = -10000
self._int_handlers[pin_num] = caller
def poll_int_async(self, pin_nums):
"""Async polling of interrupt flags."""
if self._poll_flag:
warnings.warn("Already polling for interrupts")
else:
Thread(target=self.poll_int, args=(pin_nums,)).start()
self._poll_async = True
def stop_poll_int_async(self):
"""Stop async polling"""
if self._poll_flag and self._poll_async:
self._poll_flag = False
# Wait polling thread to exit
while not self._poll_end:
time.sleep(1)
def _set_registers(self, bank):
"""Set the registers address."""
# Registers
if bank:
# They control the direction of pins
self.IODIRA = 0x00
self.IODIRB = 0x10
self.IPOLA = 0x01
self.IPOLB = 0x11
self.GPINTENA = 0x02
self.GPINTENB = 0x12
self.DEFVALA = 0x03
self.DEFVALB = 0x13
self.INTCONA = 0x04
self.INTCONB = 0x14
self.IOCON = 0x05 # This register is shared between the two ports
self.GPPUA = 0x06
self.GPPUB = 0x16
self.INTFA = 0x07
self.INTFB = 0x17
self.INTCAPA = 0x08
self.INTCAPB = 0x18
# Read from GPIOn reads the value on the port, write to them causes a
# write to the latches OLATn
self.GPIOA = 0x09
self.GPIOB = 0x19
self.OLATA = 0x0A
self.OLATB = 0x1A
else:
# They control the direction of pins
self.IODIRA = 0x00
self.IODIRB = 0x01
self.IPOLA = 0x02
self.IPOLB = 0x03
self.GPINTENA = 0x04
self.GPINTENB = 0x05
self.DEFVALA = 0x06
self.DEFVALB = 0x07
self.INTCONA = 0x08
self.INTCONB = 0x09
self.IOCON = 0x0A # This register is shared between the two ports
self.GPPUA = 0x0C
self.GPPUB = 0x0D
self.INTFA = 0x0E
self.INTFB = 0x0F
self.INTCAPA = 0x10
self.INTCAPB = 0x11
# Read from GPIOn reads the value on the port, write to them causes a
# write to the latches OLATn
self.GPIOA = 0x12
self.GPIOB = 0x13
self.OLATA = 0x14
self.OLATB = 0x15
def _get_chunk_number(self, pin_num):
"""Split a string like "A_12" to A and 12.
Args:
pin_num (str): The pin number, it must be in the form of A_x or
B_x.
Returns:
A tuple that has the "chunk" and the number of the pin.
Raises:
TypeError: Error if the pin_num is not a string. Also if in A_x the
x is not int.
ValueError: Error when in A_x the A(thing) is not A or B and
if the x(ting) is not a number smaller than 8.
"""
if not isinstance(pin_num, str):
raise TypeError("Wrong type of pin_num, must be str.")
# Check format of pin_num
pin_num = pin_num.split('_')
if len(pin_num) != 2:
raise ValueError("Wrong format of pin_num, should be A_x or B_x")
chunk = pin_num[0]
if chunk != 'A' and chunk != 'B':
raise ValueError("Wrong value of 'chunk', must be A or B")
if pin_num[1].isnumeric():
number = int(pin_num[1])
if number > 7:
raise ValueError("Wrong pin number, it must be [0, 7]")
else:
raise TypeError("Wrong type of A_x, x must be int")
return chunk, number
def set_pin_dir(self, pin_num, function):
"""Set pin direction
Args:
pin_num (str): The pin number in format A_x or B_x, where A/B is
the pin-chunk and x is the number. See modules's datasheet.
function: Boolean, it could be 1 for input and 0 for output.
"""
chunk, pin_num = self._get_chunk_number(pin_num)
address = self.IODIRA if chunk is 'A' else self.IODIRB
self._set_bit_register(address, pin_num+1, int(function))
def get_pin_dir(self, pin_num):
"""Get pin direction
Args:
pin_num (str): The pin number in format A_x or B_x, where A/B is
the pin-chunk and x is the number. See modules's datasheet.
"""
chunk, pin_num = self._get_chunk_number(pin_num)
address = self.IODIRA if chunk is 'A' else self.IODIRB
return self._get_bit_register(address, pin_num+1)
def set_pin_pol(self, pin_num, polarity):
"""Set pin polarity
Args:
pin_num (str): The pin number in format A_x or B_x, where A/B is
the pin-chunk and x is the number. See modules's datasheet.
polarity (boolean): It could be 1 for reverse and 0 for same.
"""
chunk, pin_num = self._get_chunk_number(pin_num)
address = self.IPOLA if chunk is 'A' else self.IPOLB
self._set_bit_register(address, pin_num+1, int(polarity))
def get_pin_pol(self, pin_num):
"""Get pin polarity
Args:
pin_num (str): The pin number in format A_x or B_x, where A/B is
the pin-chunk and x is the number. See modules's datasheet.
Returns:
An integer indicating the polarity of the pin. 0 is for same and
1 for reverse.
"""
chunk, pin_num = self._get_chunk_number(pin_num)
address = self.IPOLA if chunk is 'A' else self.IPOLB
return self._get_bit_register(address, pin_num+1)
def set_pin_int(self, pin_num, interrupt):
"""Set pin interrupt on change.
In order to work the DEFVAL and INTCON registers must be set.
Args:
pin_num (str): The pin number in format A_x or B_x, where A/B is
the pin-chunk and x is the number. See modules's datasheet.
interrupt: Boolean representing the interrupt status of the pin.
"""
chunk, pin_num = self._get_chunk_number(pin_num)
address = self.GPINTENA if chunk is 'A' else self.GPINTENB
self._set_bit_register(address, pin_num+1, int(interrupt))
def get_pin_int(self, pin_num):
"""Get pin interrupt on change.
In order to work the DEFVAL and INTCON registers must be set.
Args:
pin_num (str): The pin number in format A_x or B_x, where A/B is
the pin-chunk and x is the number. See modules's datasheet.
Returns:
An integer indicating the interrupt status of the pin.
"""
chunk, pin_num = self._get_chunk_number(pin_num)
address = self.GPINTENA if chunk is 'A' else self.GPINTENB
return self._get_bit_register(address, pin_num+1)
def set_pin_def_val(self, pin_num, def_val):
"""Set pin default value for comparison.
The value of each bits will be compared with the value of the associate
pin and if they are different then an interrupt will happen.
Args:
pin_num (str): The pin number in format A_x or B_x, where A/B is
the pin-chunk and x is the number. See modules's datasheet.
def_val: Int representing the compare value. Should be 0 or 1.
"""
chunk, pin_num = self._get_chunk_number(pin_num)
address = self.DEFVALA if chunk is 'A' else self.DEFVALB
self._set_bit_register(address, pin_num+1, int(def_val))
def get_pin_def_val(self, pin_num):
"""Get pin default value for comparison.
The value of each bits will be compared with the value of the associate
pin and if they are different then an interrupt will happen.
Args:
pin_num (str): The pin number in format A_x or B_x, where A/B is
the pin-chunk and x is the number. See modules's datasheet.
Returns:
Int representing the compare value. Should be 0 or 1.
"""
chunk, pin_num = self._get_chunk_number(pin_num)
address = self.DEFVALA if chunk is 'A' else self.DEFVALB
return self._get_bit_register(address, pin_num+1)
def set_pin_intcon(self, pin_num, value):
"""Set pin intcon value.
If the corresponding pin's bit is set the the value is compared with
the associate bit in the DEFVAL register. Else is compared against the
previous value.
Args:
pin_num (str): The pin number in format A_x or B_x, where A/B is
the pin-chunk and x is the number. See modules's datasheet.
value: Int representing the value. Should be 0 or 1.
"""
chunk, pin_num = self._get_chunk_number(pin_num)
address = self.INTCONA if chunk is 'A' else self.INTCONB
self._set_bit_register(address, pin_num+1, value)
def get_pin_intcon(self, pin_num):
"""Get pin intcon value.
If the corresponding pin's bit is set the the value is compared with
the associate bit in the DEFVAL register. Else is compared against the
previous value.
Args:
pin_num (str): The pin number in format A_x or B_x, where A/B is
the pin-chunk and x is the number. See modules's datasheet.
Returns:
Int representing the value. Should be 0 or 1.
"""
chunk, pin_num = self._get_chunk_number(pin_num)
address = self.INTCONA if chunk is 'A' else self.INTCONB
return self._get_bit_register(address, pin_num+1)
def set_bank(self, value):
"""Set bank bit.
It changes the registers mapping. Currenty it only sets it to 0.
Args:
value: Int represents the value.
"""
self._set_bit_register(self.IOCON, 8, value)
self._set_registers(value)
def get_bank(self):
"""Get bank bit.
It changes the registers mapping. Currenty it only sets it to 0.
Returns:
Int represents the value.
"""
return self._get_bit_register(self.IOCON, 8)
def set_mirror(self, value):
"""Set mirror bit.
If it is set the INTn pins are functionally OR'ed.
Args:
value: Int represents the value.
"""
self._set_bit_register(self.IOCON, 7, value)
def get_mirror(self):
"""Get mirror bit.
If it is set the INTn pins are functionally OR'ed.
Returns:
Int represents the value.
"""
return self._get_bit_register(self.IOCON, 7)
def set_seqop(self, value):
"""Set SEQOP bit.
It changes the sequential operation. It is usefull for polling
Args:
value: Int represents the value.
"""
self._set_bit_register(self.IOCON, 6, value)
def get_seqop(self):
"""Get SEQOP bit.
It changes the sequential operation. It is usefull for polling
Returns:
Int represents the value.
"""
return self._get_bit_register(self.IOCON, 6)
def set_disslw(self, value):
"""Set DISSLW bit.
It controls the slew rate of SDA pin
Args:
value: Int represents the value.
"""
self._set_bit_register(self.IOCON, 5, value)
def get_disslw(self):
"""Get DISSLW bit.
It controls the slew rate of SDA pin
Returns:
Int represents the value.
"""
return self._get_bit_register(self.IOCON, 5)
def set_haen(self, value):
"""It is usefull only in the mcp23s17."""
#"""Set HAEN bit.
#If it set the hardware address is controlled from A2 A1 A0. I
#Args:
# value: Int represents the value.
#"""
#self._set_bit_register(self.IOCON, 4, value)
pass
def set_odr(self, value):
"""Set ODR bit.
It enables the int pin for open drain configuration. It overrides the
INTPOL bit.
Args:
value: Int represents the value.
"""
self._set_bit_register(self.IOCON, 3, value)
def get_odr(self):
"""Get ODR bit.
It enables the int pin for open drain configuration. It overrides the
INTPOL bit.
Returns:
Int represents the value.
"""
return self._get_bit_register(self.IOCON, 3)
def set_intpol(self, value):
"""Set INTPOL bit.
It sets the polarity of the INT pin.
Args:
value: Int represents the value.
"""
self._set_bit_register(self.IOCON, 2, value)
def get_intpol(self):
"""Get INTPOL bit.
It sets the polarity of the INT pin.
Returns:
Int represents the value.
"""
return self._get_bit_register(self.IOCON, 2)
def set_pin_pull_up(self, pin_num, pull):
"""Set the pull up of a pin.
Args:
pin_num (str): The pin number in format A_x or B_x, where A/B is
the pin-chunk and x is the number. See modules's datasheet.
pull (boolean): It could be 0 for down and 1 for up.
"""
chunk, pin_num = self._get_chunk_number(pin_num)
address = self.GPPUA if chunk is 'A' else self.GPPUB
self._set_bit_register(address, pin_num+1, int(pull))
def get_pin_pull_up(self, pin_num):
"""Get the pull up of a pin.
Args:
pin_num (str): The pin number in format A_x or B_x, where A/B is
the pin-chunk and x is the number. See modules's datasheet.
Returns:
Int indicating the pin pull up resistor could be 0 for down and
1 for up.
"""
chunk, pin_num = self._get_chunk_number(pin_num)
address = self.GPPUA if chunk is 'A' else self.GPPUB
return self._get_bit_register(address, pin_num+1)
def get_intf(self, pin_num):
"""Get the pin interrupt flag.
It reflects if the pin caused the interrupt.
Args:
pin_num (str): The pin number in format A_x or B_x, where A/B is
the pin-chunk and x is the number. See modules's datasheet.
Returns:
The flag value.
"""
chunk, pin_num = self._get_chunk_number(pin_num)
address = self.INTFA if chunk is 'A' else self.INTFB
return self._get_bit_register(address, pin_num+1)
def get_mult_intf(self, pin_num):
"""Get the pin interrupt flag with more bytes
It reflects if the pin caused the interrupt.
Args:
pin_num (str): The pin number in format A_x or B_x, where A/B is
the pin-chunk and x is the number. See modules's datasheet.
Returns:
A list with the flag value.
"""
chunk, pin_num = self._get_chunk_number(pin_num)
address = self.INTFA if chunk is 'A' else self.INTFB
data = self._read_sequential(address, 32)
print(data)
data = [self._get_bit(register, pin_num+1) for register in data]
return data
def get_intcap(self, pin_num):
"""Get the pin's state when the interrupt occured.
Args:
pin_num (str): The pin number in format A_x or B_x, where A/B is
the pin-chunk and x is the number. See modules's datasheet.
Returns:
The flag value.
"""
chunk, pin_num = self._get_chunk_number(pin_num)
address = self.INTCAPA if chunk is 'A' else self.INTCAPB
return self._get_bit_register(address, pin_num+1)
def read(self, pin_num):
"""Read the pins state.
Args:
pin_num (str): The pin number in format A_x or B_x, where A/B is
the pin-chunk and x is the number. See modules's datasheet.
Returns:
The pin's state.
"""
chunk, pin_num = self._get_chunk_number(pin_num)
address = self.GPIOA if chunk is 'A' else self.GPIOB
return self._get_bit_register(address, pin_num+1)
def write(self, pin_num, value):
"""Write to the pin
Args:
pin_num (str): The pin number in format A_x or B_x, where A/B is
the pin-chunk and x is the number. See modules's datasheet.
value: Int could be 0 or 1.
"""
chunk, pin_num = self._get_chunk_number(pin_num)
address = self.GPIOA if chunk is 'A' else self.GPIOB
self._set_bit_register(address, pin_num+1, value)
def read_olat(self, pin_num):
"""Read the olat register.
Args:
pin_num (str): The pin number in format A_x or B_x, where A/B is
the pin-chunk and x is the number. See modules's datasheet.
"""
chunk, pin_num = self._get_chunk_number(pin_num)
address = self.OLATA if chunk is 'A' else self.OLATB
return self._get_bit_register(address, pin_num+1)
def write_olat(self, pin_num, value):
"""Write to the pin olat
Args:
pin_num (str): The pin number in format A_x or B_x, where A/B is
the pin-chunk and x is the number. See modules's datasheet.
value: Int could be 0 or 1.
"""
chunk, pin_num = self._get_chunk_number(pin_num)
address = self.OLATA if chunk is 'A' else self.OLATB
self._set_bit_register(address, pin_num+1, value)
def _read_interface(self, address):
"""Wrapper to interface read function."""
pass
def _write_interface(self, address, value):
"""Wrapper to interface write function."""
pass
def _set_bit_register(self, address, bit, value):
"""Set i'th bit in from register in address.
Args:
address:
bit:
value:
"""
register = self._read_interface(address)
register = self._set_bit(register, bit, value)
self._write_interface(address, register)
def _get_bit_register(self, address, bit):
"""Get i'th bit in from register in address.
Args:
address:
bit:
Returns:
The i'th bit from register.
"""
register = self._read_interface(address)
return self._get_bit(register, bit)
def _set_bit(self, register, bit, value, res=8):
"""Set value for specific bit in register in 8bit registers.
Args:
register: The 8 bit value.
bit: The i'th bit to be changed. It should be 1 to res.
value: 0 or 1.
res: The bit resolution of the register
Returns:
The new value of register.
"""
if bit < 1 or bit > res:
# raise exception
pass
max_val = 2**res - 1
mask = ((max_val << bit) | ((0x1 << (bit-1)) - 1)) & max_val
register &= mask
return register | (value << (bit-1))
def _get_bit(self, register, bit):
"""Get the value of a specific bit from register.
Args:
register: The value
bit: The i'th bit to be read. The value should be between
Returns:
The value of the i'th bit of register.
"""
if bit < 1:
# raise exception
pass
bit -= 1
return (register & (0x1 << bit)) >> bit
|
process_example.py
|
from multiprocessing import Process, Value, Array, Lock
import time
def add_100(numbers, lock):
for i in range(100):
time.sleep(0.01)
# with lock: # no other processes can access this part of code, lock as context manager
# number.value += 1
for i in range(len(numbers)):
with lock:
numbers[i] += 1
if __name__ == '__main__':
lock = Lock()
# shared_number = Value("i", 0)
shared_array = Array("d", [0.0, 100.0, 200.0]) # d: double
# print("Number at the beginning is", shared_number.value)
print("Array at the beginning is", shared_array[:])
p1 = Process(target=add_100, args=(shared_array, lock))
p2 = Process(target=add_100, args=(shared_array, lock))
p1.start()
p2.start()
p1.join()
p2.join()
# print("Number at the end is", shared_number.value)
print("Array at the end is", shared_array[:])
|
client.py
|
#!/bin/python
# coding=utf-8
import threading
import sys
from communication.protocol import *
reload(sys)
sys.setdefaultencoding('utf-8')
def _receive_message(client_socket):
while True:
_, receive_message = recv_data(client_socket)
print receive_message
host_ip = get_ip()
host_port = get_port()
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
client.connect((host_ip, host_port))
except Exception as e:
print e
exit(1)
receive_t = threading.Thread(target=_receive_message, args=(client,))
# quit with main program
receive_t.setDaemon(True)
receive_t.start()
while True:
data = raw_input("> ")
ret = send_data(client, data)
if ret == -1:
continue
if str.strip(data) in (EXIT_STR, EXIT_ALL_STR):
client.close()
exit(0)
|
_creating_thread.py
|
#creating a thread without using a class
from threading import*
# def display():
# for i in range(1, 11):
# print('child Thread')
# t = Thread(target = display) #creating thread object
# t.start()
# for i in range(1, 11):
# print('main thread')
#creating a thread by extending class
# from threading import*
# class MyThread(Thread):
# def run(self):
# for i in range(10):
# print('child thread-1')
# t = MyThread()
# t.start()
# for i in range(10):
# print('main Thread-1')
from threading import*
class Test:
def display(self):
for i in range(10):
print('child thread-2')
obj = Test()
t = Thread(target = obj.display)
t.start()
for i in range(10):
print('main Thread-2')
|
ssr_check.py
|
#!/usr/bin/env python3
import requests
import time
import threading
from ssshare.ss import ss_local
import random
def test_connection(
url='http://cip.cc',
headers={'User-Agent': 'curl/7.21.3 (i686-pc-linux-gnu) ' 'libcurl/7.21.3 OpenSSL/0.9.8o zlib/1.2.3.4 libidn/1.18'},
proxies=None, port=1080, timeout=10):
if not proxies:
proxies = {'http': 'socks5://localhost:{}'.format(port), 'https': 'socks5://localhost:{}'.format(port)}
ok = False
content = ''
try:
respond = requests.get(url, headers=headers, proxies=proxies, timeout=timeout)
ok = respond.ok
content = respond.text
except Exception as e:
print(e)
return ok, content
def test_socks_server(dictionary=None, str_json=None, port=None):
if not port:
port = random.randint(2000, 3000)
try:
try:
loop, tcps, udps = ss_local.main(
dictionary=dictionary, str_json=str_json, port=port)
except Exception as e:
print(e)
return -1, 'SSR start failed'
try:
t = threading.Thread(target=loop.run)
t.start()
time.sleep(3)
conn, content = test_connection(port=port)
loop.stop()
t.join()
tcps.close(next_tick=True)
udps.close(next_tick=True)
time.sleep(1)
return conn, content
except Exception as e:
print(e)
return -2, 'Thread or Connection to website failed'
except SystemExit as e:
return e.code - 10, 'Unknown failure'
def validate(websites):
for servers in websites:
print(servers['info'])
for server in servers['data']:
result, info = test_socks_server(str_json=server['json'])
print('>' * 10, '结果:', result)
if result is True:
print('>' * 10, '测试通过!')
elif result == -1:
print(server['json'])
server['status'] = result
server['content'] = info
return websites
if __name__ == '__main__':
print(test_connection())
|
ModbusTCP.py
|
#!/usr/bin/env python3
# Copyright (c) 2017 Dennis Mellican
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from SungrowModbusTcpClient import SungrowModbusTcpClient
from pymodbus.payload import BinaryPayloadDecoder
from pymodbus.client.sync import ModbusTcpClient
from pymodbus.constants import Endian
from importlib import import_module
from threading import Thread
import paho.mqtt.client as mqtt
import datetime
import logging
import getopt
import json
import time
import sys
import re
MIN_SIGNED = -2147483648
MAX_UNSIGNED = 4294967295
##Load options
options = {}
full_cmd_arguments = sys.argv
argument_list = full_cmd_arguments[1:]
short_options = 'i:p:m:M:o:U:P:l:s:t'
long_options = ['ip=', 'port=', 'model=', 'mqtt_host=', 'mqtt_port=',
'mqtt_user=', 'mqtt_pass=', 'log_level=', 'scan=', 'timeout=']
try:
arguments, values = getopt.getopt(
argument_list, short_options, long_options)
except getopt.error as e:
raise ValueError('Invalid parameters!')
for current_argument, current_value in arguments:
if current_value == 'null' or len(current_value) == 0 or current_value.isspace():
pass
elif current_argument in ("-i", "--ip"):
options['inverter_ip'] = current_value
elif current_argument in ("-p", "--port"):
options['inverter_port'] = current_value
elif current_argument in ("-m", "--model"):
options['model'] = current_value
elif current_argument in ("-M", "--mqtt_host"):
options['mqtt_host'] = current_value
elif current_argument in ("-o", "--mqtt_port"):
options['mqtt_port'] = int(current_value)
elif current_argument in ("-U", "--mqtt_user"):
options['mqtt_user'] = current_value
elif current_argument in ("-P", "--mqtt_pass"):
options['mqtt_pass'] = current_value
elif current_argument in ("-l", "--log_level"):
options['log_level'] = current_value
elif current_argument in ("-s", "--scan"):
options['scan_interval'] = int(current_value)
elif current_argument in ("-t", "--timeout"):
options['timeout'] = int(current_value)
if options['log_level'] == 'WARNING':
log_level = logging.WARNING
elif options['log_level'] == 'INFO':
log_level = logging.INFO
else:
log_level = logging.DEBUG
logging.basicConfig(level=log_level)
if "sungrow-" in options['model']:
options['slave'] = 0x01
else:
options['slave'] = 3
# SMA datatypes and their register lengths
# S = Signed Number, U = Unsigned Number, STR = String
sma_moddatatype = {
"S16": 1,
"U16": 1,
"S32": 2,
"U32": 2,
"U64": 4,
"STR16": 8,
"STR32": 16,
}
# Load the modbus register map for the inverter
modmap_file = f"modbus-{options['model']}"
try:
modmap = import_module(modmap_file)
except ModuleNotFoundError:
logging.error(f"Unable to locate {modmap_file}.py")
sys.exit(1)
# This will try the Sungrow client otherwise will default to the standard library.
client_payload = {
"host": options['inverter_ip'],
"timeout": options['timeout'],
"RetryOnEmpty": True,
"retries": 3,
"port": options['inverter_port'],
}
if "sungrow-" in options['model']:
logging.info(f"Create SungrowModbusTcpClient. Model: {options['model']}")
client = SungrowModbusTcpClient.SungrowModbusTcpClient(**client_payload)
else:
logging.info(f"Create ModbusTcpClient. Model: {options['model']}")
client = ModbusTcpClient(**client_payload)
client.connect()
client.close()
logging.info("Modbus connected")
# Configure MQTT
mqtt_client = mqtt.Client("ModbusTCP")
mqtt_client.username_pw_set(options['mqtt_user'], options['mqtt_pass'])
if options['mqtt_port'] == 8883:
mqtt_client.tls_set()
mqtt_client.connect(options['mqtt_host'], port=options['mqtt_port'])
logging.info("Configured MQTT Client")
# Inverter Scanning
inverter = {}
bus = json.loads(modmap.scan)
def load_registers(register_type, start, count=100):
try:
if register_type == "read":
rr = client.read_input_registers(
int(start),
count=count,
unit=options['slave'],
)
elif register_type == "holding":
rr = client.read_holding_registers(
int(start),
count=count,
unit=options['slave'],
)
else:
raise RuntimeError(f"Unsupported register type: {type}")
except Exception as err:
logging.warning("No data. Try increasing the timeout or scan interval.")
return False
if rr.isError():
logging.warning("Modbus connection failed")
return False
if not hasattr(rr, 'registers'):
logging.warning("No registers returned")
return
if len(rr.registers) != count:
logging.warning(f"Mismatched number of registers read {len(rr.registers)} != {count}")
return
overflow_regex = re.compile(r"(?P<register_name>[a-zA-Z0-9_\.]+)_overflow$")
divide_regex = re.compile(r"(?P<register_name>[a-zA-Z0-9_]+)_(?P<divide_by>[0-9\.]+)$")
for num in range(0, count):
run = int(start) + num + 1
if register_type == "read" and modmap.read_register.get(str(run)):
register_name = modmap.read_register.get(str(run))
register_value = rr.registers[num]
# Check if the modbus map has an '_overflow' on the end
# If so the value 'could' be negative (65535 - x) where (-x) is the actual number
# So a value of '64486' actually represents '-1049'
# We rely on a second '_indicator' register to tell is if it's actually negative or not, otherwise it's ambigious!
should_overflow = overflow_regex.match(register_name)
if should_overflow:
register_name = should_overflow["register_name"]
# Find the indicator register value
indicator_name = f"{register_name}_indicator"
for reg_num, reg_name in modmap.read_register.items():
if reg_name == indicator_name:
indicator_register = int(reg_num)
break
else:
indicator_register = None
if indicator_register is not None:
# Given register '5084' and knowing start of '5000' we can assume the index
# Of our indicator value is 5084 - 5000 - 1 (because of the 'off by 1')
indicator_value = rr.registers[indicator_register - int(start) - 1]
if indicator_value == 65535:
# We are in overflow
register_value = -1 * (65535 - register_value)
# Check if the modbus map has an '_10' or '_100' etc on the end
# If so, we divide by that and drop it from the name
should_divide = divide_regex.match(register_name)
if should_divide:
register_name = should_divide["register_name"]
register_value = float(register_value) / float(should_divide["divide_by"])
# Set the final register name and value, any adjustments above included
inverter[register_name] = register_value
elif register_type == "holding" and modmap.holding_register.get(str(run)):
register_name = modmap.holding_register.get(str(run))
register_value = rr.registers[num]
inverter[register_name] = register_value
return True
# Function for polling data from the target and triggering writing to log file if set
def load_sma_register(registers):
# Request each register from datasets, omit first row which contains only column headers
for thisrow in registers:
name = thisrow[0]
startPos = thisrow[1]
type = thisrow[2]
format = thisrow[3]
# If the connection is somehow not possible (e.g. target not responding)
# show a error message instead of excepting and stopping
try:
received = client.read_input_registers(
address=startPos,
count=sma_moddatatype[type],
unit=options['slave']
)
except Exception:
thisdate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
logging.error(f"{thisdate}: Connection not possible, check settings or connection")
return
message = BinaryPayloadDecoder.fromRegisters(received.registers, endian=Endian.Big)
# Provide the correct result depending on the defined datatype
if type == "S32":
interpreted = message.decode_32bit_int()
elif type == "U32":
interpreted = message.decode_32bit_uint()
elif type == "U64":
interpreted = message.decode_64bit_uint()
elif type == "STR16":
interpreted = message.decode_string(16)
elif type == "STR32":
interpreted = message.decode_string(32)
elif type == "S16":
interpreted = message.decode_16bit_int()
elif type == "U16":
interpreted = message.decode_16bit_uint()
else:
# If no data type is defined do raw interpretation of the delivered data
interpreted = message.decode_16bit_uint()
# Check for "None" data before doing anything else
if ((interpreted == MIN_SIGNED) or (interpreted == MAX_UNSIGNED)):
displaydata = None
else:
# Put the data with correct formatting into the data table
if format == "FIX3":
displaydata = float(interpreted) / 1000
elif format == "FIX2":
displaydata = float(interpreted) / 100
elif format == "FIX1":
displaydata = float(interpreted) / 10
else:
displaydata = interpreted
logging.debug(f"************** {name} = {displaydata}")
inverter[name] = displaydata
# Add timestamp
inverter["Timestamp"] = datetime.datetime.now().strftime("%Y-%mm-%dd %H:%M:%S")
def publish_mqtt_discovery(inverter):
mqtt_client.reconnect()
logging.info("Publish Home Assistant Discovery message")
SENSOR_TOPIC = 'inverter_{}/tele/SENSOR'.format(options['model'])
DISCOVERY_TOPIC = 'homeassistant/sensor/inverter{}/{}/config'.format(options['model'], "{}") # energy/power
if "sungrow-" in options['model']:
manufacturer = 'Sungrow'
else:
manufacturer = 'SMA'
DISCOVERY_PAYLOAD = '{{"name": "Inverter {}", "uniq_id":"{}","stat_t": "{}", "json_attr_t": "{}", "unit_of_meas": "{}","dev_cla": "{}","state_class": "{}", "val_tpl": "{{{{ value_json.{} }}}}", "ic": "mdi:solar-power","device":{{ "name": "Solar Inverter","mf": "{}", "mdl": "{}", "connections":[["address", "{}" ]] }} }}'
energy_today_msg = DISCOVERY_PAYLOAD.format("Energy Today","inverter_energy_today", SENSOR_TOPIC, SENSOR_TOPIC, "kWh", "energy", "total_increasing", "daily_power_yield / 1000", manufacturer, options['model'], options['inverter_ip'])
energy_month_msg = DISCOVERY_PAYLOAD.format("Energy Monthly","inverter_energy_month", SENSOR_TOPIC, SENSOR_TOPIC, "kWh", "energy", "total_increasing", "monthly_power_yield / 1000", manufacturer, options['model'], options['inverter_ip'])
power_msg = DISCOVERY_PAYLOAD.format("Power", "inverter_power", SENSOR_TOPIC, SENSOR_TOPIC, "W", "power", "measurement","total_pv_power", manufacturer, options['model'], options['inverter_ip'], options['inverter_port'])
tempertature_msg = DISCOVERY_PAYLOAD.format("Temperature", "inverter_temperature", SENSOR_TOPIC, SENSOR_TOPIC, "°C", "temperature", "measurement","internal_temp", manufacturer, options['model'], options['inverter_ip'], options['inverter_port'])
daily_energy_consumpt_msg = DISCOVERY_PAYLOAD.format("Daily Energy Consumption", "inverter_energy_consumpt_daily", SENSOR_TOPIC, SENSOR_TOPIC, "kWh", "energy", "total_increasing","daily_energy_consumption / 1000", manufacturer, options['model'], options['inverter_ip'], options['inverter_port'])
daily_energy_import_msg = DISCOVERY_PAYLOAD.format("Daily Energy Import", "inverter_energy_import_daily", SENSOR_TOPIC, SENSOR_TOPIC, "kWh", "energy", "total_increasing","daily_purchased_energy / 1000", manufacturer, options['model'], options['inverter_ip'], options['inverter_port'])
result = mqtt_client.publish(DISCOVERY_TOPIC.format("energy_today"), energy_today_msg)
result = mqtt_client.publish(DISCOVERY_TOPIC.format("energy_monthly"), energy_month_msg)
result = mqtt_client.publish(DISCOVERY_TOPIC.format("power"), power_msg)
result = mqtt_client.publish(DISCOVERY_TOPIC.format("temperature"), tempertature_msg)
result = mqtt_client.publish(DISCOVERY_TOPIC.format("daily_energy_consumpt"), daily_energy_consumpt_msg)
result = mqtt_client.publish(DISCOVERY_TOPIC.format("daily_energy_import"), daily_energy_import_msg)
result.wait_for_publish()
def publish_mqtt(inverter):
# After a while you'll need to reconnect, so just reconnect before each publish
mqtt_client.reconnect()
SENSOR_TOPIC = 'inverter_{}/tele/SENSOR'.format(options['model'])
result = mqtt_client.publish(SENSOR_TOPIC, json.dumps(inverter).replace('"', '\"'))
result.wait_for_publish()
if result.rc != mqtt.MQTT_ERR_SUCCESS:
# See https://github.com/eclipse/paho.mqtt.python/blob/master/src/paho/mqtt/client.py#L149 for error code mapping
logging.error(f"Failed to publish to MQTT with error code: {result.rc}")
else:
logging.info("Published to MQTT")
return result
# Core monitoring loop
def scrape_inverter():
""" Connect to the inverter and scrape the metrics """
client.connect()
if "sungrow-" in options['model']:
for i in bus["read"]:
if not load_registers("read", i["start"], int(i["range"])):
return False
for i in bus["holding"]:
if not load_registers("holding", i["start"], int(i["range"])):
return False
# Sungrow inverter specifics:
# Work out if the grid power is being imported or exported
if options['model'] == "sungrow-sh5k":
try:
if inverter["grid_import_or_export"] == 65535:
export_power = (65535 - inverter["export_power"]) * -1
inverter["export_power"] = export_power
except Exception:
pass
try:
inverter["timestamp"] = "%s-%02d-%02dT%s:%02d:%02d" % (
inverter["year"],
inverter["month"],
inverter["day"],
inverter["hour"],
inverter["minute"],
inverter["second"],
)
del inverter["year"]
del inverter["month"]
del inverter["day"]
del inverter["hour"]
del inverter["minute"]
del inverter["second"]
except Exception:
pass
elif "sma-" in options['model']:
load_sma_register(modmap.sma_registers)
else:
raise RuntimeError(f"Unsupported inverter model detected: {options['model']}")
client.close()
logging.info(inverter)
return True
#Publish once
publish_mqtt_discovery(inverter)
while True:
# Scrape the inverter
success = scrape_inverter()
if not success:
logging.warning("Failed to scrape inverter, sleeping until next scan")
time.sleep(options['scan_interval'])
continue
# Optionally publish the metrics if enabled
if mqtt_client is not None:
t = Thread(target=publish_mqtt, args=(inverter,))
t.start()
# Sleep until the next scan
time.sleep(options['scan_interval'])
|
mobile_insight_gui.py
|
#!/usr/bin/python3
"""
Python GUI for MobileInsight
Author: Moustafa Alzantot
Date : Feb 26, 2016
"""
import sys
import wx
import wx.grid
import wx.adv
from threading import Thread
from random import random
from datetime import datetime, timedelta
import matplotlib
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
# from matplotlib.backends.backend_wx import NavigationToolbar2Wx, wxc
from matplotlib.backends.backend_wx import NavigationToolbar2Wx
from matplotlib.figure import Figure
import xml.dom.minidom
import xml.etree.ElementTree as ET
from mobile_insight.analyzer import LogAnalyzer
from mobile_insight.monitor.dm_collector.dm_endec.dm_log_packet import DMLogPacket
ID_FILE_OPEN = wx.NewId()
ID_FILE_EXIT = wx.NewId()
ID_TB_OPEN = wx.NewId()
ID_TB_FILTER = wx.NewId()
ID_TB_SEARCH = wx.NewId()
ID_TB_TIME = wx.NewId()
ID_TB_RESET = wx.NewId()
ID_TB_GRAPH = wx.NewId()
EVT_RESULT_ID = wx.NewId()
def EVT_RESULT(win, func):
win.Connect(-1, -1, EVT_RESULT_ID, func)
class ResultEvent(wx.PyEvent):
def __init__(self, data):
wx.PyEvent.__init__(self)
self.SetEventType(EVT_RESULT_ID)
self.data = data
class ProgressDialog(wx.Dialog):
def __init__(self, parent):
wx.Dialog.__init__(self, parent, -1, style=wx.NO_BORDER)
mainSizer = wx.BoxSizer(wx.VERTICAL)
anim = wx.adv.Animation("icons/loading.gif")
gif = wx.adv.AnimationCtrl(self, -1, anim, size=(-1, -1))
# gif = wx.adv.AnimationCtrl(self, pos=(0, 0), size=(-1, -1))
gif.Play()
mainSizer.Add(gif, wx.EXPAND | wx.ALL)
self.SetSizer(mainSizer)
self.Fit()
class TimeWindowDialog(wx.Dialog):
def __init__(self, parent, start_time, end_time):
wx.Dialog.__init__(self, parent, -1)
sizer = wx.BoxSizer(wx.VERTICAL)
self.SetTitle("Time Window")
self.start_label = wx.StaticText(self, -1, label="...", style=wx.BOLD)
self.end_label = wx.StaticText(self, -1, label="...", style=wx.BOLD)
self.window_label = wx.StaticText(self, -1, "\t to \t")
# self.start_label.SetFont(wx.Font(11, wx.DEFAULT, wx.BOLD, wx.NORMAL))
# self.window_label.SetFont(wx.Font(11, wx.DEFAULT, wx.ITALIC, wx.NORMAL))
# self.end_label.SetFont(wx.Font(11, wx.DEFAULT, wx.BOLD, wx.NORMAL))
self.start_label.SetFont(wx.Font(11, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_SLANT, wx.FONTWEIGHT_NORMAL))
self.window_label.SetFont(wx.Font(11, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_ITALIC, wx.FONTWEIGHT_NORMAL))
self.end_label.SetFont(wx.Font(11, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_SLANT, wx.FONTWEIGHT_NORMAL))
labelSizer = wx.BoxSizer(wx.HORIZONTAL)
labelSizer.Add(self.start_label, 0, wx.ALL | wx.EXPAND, 3)
labelSizer.Add(self.window_label, wx.ALL, 1)
labelSizer.Add(self.end_label, 0, wx.ALL | wx.EXPAND, 3)
self.btns = self.CreateSeparatedButtonSizer(wx.OK | wx.CANCEL)
start_sizer = wx.BoxSizer(wx.HORIZONTAL)
start_sizer.Add(wx.StaticText(self, -1, "Start: "), 0, wx.ALL, 1)
self.start_slider = wx.Slider(
self, -1, 0, 0, 100, wx.DefaultPosition, (250, -1), wx.SL_HORIZONTAL)
start_sizer.Add(self.start_slider, 0, wx.ALL | wx.EXPAND, 5)
self.Bind(wx.EVT_SLIDER, self.start_slider_update, self.start_slider)
end_sizer = wx.BoxSizer(wx.HORIZONTAL)
end_sizer.Add(wx.StaticText(self, -1, "End: "), 0, wx.ALL, 1)
self.end_slider = wx.Slider(
self, -1, 100, 0, 100, wx.DefaultPosition, (250, -1), wx.SL_HORIZONTAL)
end_sizer.Add(self.end_slider, 0, wx.ALL | wx.EXPAND, 5)
self.Bind(wx.EVT_SLIDER, self.end_slider_udpate, self.end_slider)
self.start_time = start_time
self.cur_end = end_time
self.cur_start = self.start_time
self.unit_seconds = (end_time - start_time).total_seconds() / 100.0
self.updateUI()
sizer.Add(labelSizer, 0, wx.ALL | wx.EXPAND, 5)
sizer.Add(start_sizer, 0, wx.ALL | wx.EXPAND, 5)
sizer.Add(end_sizer, 0, wx.ALL | wx.EXPAND, 5)
sizer.Add(self.btns, 0, wx.ALL | wx.EXPAND, 5)
self.SetSizer(sizer)
self.Fit()
def start_slider_update(self, event):
delta_seconds = self.start_slider.GetValue() * self.unit_seconds
self.cur_start = self.start_time + \
timedelta(seconds=int(delta_seconds))
self.updateUI()
def end_slider_udpate(self, event):
delta_seconds = self.end_slider.GetValue() * self.unit_seconds
self.cur_end = self.start_time + timedelta(seconds=int(delta_seconds))
self.updateUI()
def updateUI(self):
self.start_label.SetLabel(format("%s" % (self.cur_start)))
self.end_label.SetLabel(format("%s" % (self.cur_end)))
class MyMCD(wx.Dialog):
def __init__(self, parent, message, caption, choices=[]):
wx.Dialog.__init__(self, parent, -1)
self.SetTitle(caption)
sizer = wx.BoxSizer(wx.VERTICAL)
self.message = wx.StaticText(self, -1, message)
self.clb = wx.CheckListBox(self, -1, choices=choices)
self.chbox = wx.CheckBox(self, -1, 'Select all')
self.btns = self.CreateSeparatedButtonSizer(wx.OK | wx.CANCEL)
self.Bind(wx.EVT_CHECKBOX, self.EvtChBox, self.chbox)
sizer.Add(self.message, 0, wx.ALL | wx.EXPAND, 5)
sizer.Add(self.clb, 1, wx.ALL | wx.EXPAND, 5)
sizer.Add(self.chbox, 0, wx.ALL | wx.EXPAND, 5)
sizer.Add(self.btns, 0, wx.ALL | wx.EXPAND, 5)
self.SetSizer(sizer)
# self.Fit()
def GetSelections(self):
return self.clb.GetChecked()
def EvtChBox(self, event):
state = self.chbox.IsChecked()
for i in range(self.clb.GetCount()):
self.clb.Check(i, state)
class WindowClass(wx.Frame):
def __init__(self, *args, **kwargs):
super(WindowClass, self).__init__(*args, **kwargs)
self.min_time = datetime.strptime("3000 Jan 1", '%Y %b %d')
self.max_time = datetime.strptime("1900 Jan 1", '%Y %b %d')
self.selectedTypes = None # Message Filters
self.basicGUI()
def basicGUI(self):
self._log_analyzer = LogAnalyzer(self.OnReadComplete)
menuBar = wx.MenuBar()
fileButton = wx.Menu()
editButton = wx.Menu()
openItem = fileButton.Append(ID_FILE_OPEN, "Open", "Open log file")
exitItem = fileButton.Append(ID_FILE_EXIT, "Exit", "Exit application")
menuBar.Append(fileButton, 'File')
menuBar.Append(editButton, "Edit")
self.SetMenuBar(menuBar)
self.Bind(wx.EVT_MENU, self.Quit, exitItem)
self.Bind(wx.EVT_MENU, self.Open, openItem)
# Toolbar
self.toolbar = self.CreateToolBar(
wx.TB_FLAT | wx.TB_TEXT | wx.TB_HORIZONTAL | wx.NO_BORDER)
toolbar_open = self.toolbar.AddLabelTool(
ID_TB_OPEN, "Open", wx.Bitmap("/usr/local/share/mobileinsight/icons/open.png"))
self.toolbar.AddSeparator()
toolbar_filter = self.toolbar.AddLabelTool(
ID_TB_FILTER, "Filter", wx.Bitmap("/usr/local/share/mobileinsight/icons/filter.png"))
self.toolbar.AddSeparator()
toolbar_search = self.toolbar.AddLabelTool(
ID_TB_SEARCH, "Search", wx.Bitmap("/usr/local/share/mobileinsight/icons/search.png"))
self.toolbar.AddSeparator()
toolbar_time = self.toolbar.AddLabelTool(
ID_TB_TIME, "Time Window", wx.Bitmap("/usr/local/share/mobileinsight/icons/time.png"))
self.toolbar.AddSeparator()
toolbar_reset = self.toolbar.AddLabelTool(
ID_TB_RESET, "Reset", wx.Bitmap("/usr/local/share/mobileinsight/icons/reset.png"))
# self.toolbar.AddSeparator()
# toolbar_graph = self.toolbar.AddLabelTool(ID_TB_GRAPH, "Graph", wx.Bitmap("/usr/local/share/mobileinsight/icons/graph.png"))
self.toolbar.AddSeparator()
toolbar_about = self.toolbar.AddLabelTool(
ID_TB_GRAPH, "About", wx.Bitmap("/usr/local/share/mobileinsight/icons/about.png"))
self.Bind(wx.EVT_TOOL, self.Open, toolbar_open)
self.Bind(wx.EVT_TOOL, self.OnFilter, toolbar_filter)
self.Bind(wx.EVT_TOOL, self.OnSearch, toolbar_search)
self.Bind(wx.EVT_TOOL, self.OnTime, toolbar_time)
self.Bind(wx.EVT_TOOL, self.OnReset, toolbar_reset)
# self.Bind(wx.EVT_TOOL, self.OnGraph, toolbar_graph)
self.Bind(wx.EVT_TOOL, self.OnAbout, toolbar_about)
self.toolbar.Realize()
# Main Panel
panel = wx.Panel(self, -1, size=(-1, -1), style=wx.BORDER_RAISED)
mainSizer = wx.BoxSizer(wx.HORIZONTAL)
hbox = wx.BoxSizer(wx.HORIZONTAL)
self.grid = wx.grid.Grid(self)
self.grid.CreateGrid(50, 2)
self.grid.SetSelectionMode(1) # 1 is Select Row
self.grid.Bind(wx.grid.EVT_GRID_SELECT_CELL, self.OnGridSelect)
self.grid.SetColLabelValue(0, "Timestamp")
self.grid.SetColLabelValue(1, "Type ID")
hbox.Add(self.grid, 5, wx.EXPAND | wx.ALL, 10)
leftPanel = wx.Panel(self, -1, size=(-1, -1), style=wx.BORDER_RAISED)
leftbox = wx.BoxSizer(wx.VERTICAL)
self.status_text = wx.StaticText(
leftPanel,
label="Welcome to MobileInsight 6.0 beta!\n\nMobileInsight is a Python 3 package for mobile network monitoring and analysis on the end device.",
style=wx.ALIGN_LEFT)
#self.details_text = wx.TextCtrl(leftPanel, style=wx.ALIGN_LEFT | wx.TE_MULTILINE)
self.details_text = wx.TreeCtrl(leftPanel, style=wx.TR_DEFAULT_STYLE | wx.TR_LINES_AT_ROOT)
leftbox.Add(self.status_text, 1, wx.EXPAND | wx.HORIZONTAL)
leftbox.Add(self.details_text, 3, wx.EXPAND)
leftPanel.SetSizer(leftbox)
hbox.Add(leftPanel, 4, wx.EXPAND | wx.ALL, 10)
self.grid.SetColSize(0, 200)
self.grid.SetColSize(1, 300)
self.grid.ForceRefresh()
panel.SetSizer(hbox)
mainSizer.Add(panel, 1, wx.EXPAND, 0)
self.SetSizer(mainSizer)
self.statusbar = self.CreateStatusBar()
self.Bind(wx.EVT_CLOSE, self.Quit)
self.SetTitle("MobileInsight")
self.SetSize((1200, 800))
self.Centre()
self.Show(True)
self.data = None
EVT_RESULT(self, self.OnResult)
def OnResult(self, event):
if self.progressDialog:
self.progressDialog.EndModal(wx.ID_CANCEL)
self.progressDialog.Destroy()
data = event.data
if data:
self.statusbar.SetStatusText("Read %d logs" % len(data))
self.data = data
self.data_view = self.data
self.SetupGrid()
def Open(self, e):
openFileDialog = wx.FileDialog(
self,
"Open Log file",
"",
"",
"log files (*.mi2log) |*.mi2log| All files |*.*",
wx.FD_OPEN | wx.FD_FILE_MUST_EXIST | wx.FD_MULTIPLE)
if (openFileDialog.ShowModal() == wx.ID_OK):
# print 'Selected %s' %openFileDialog.GetPath()
print(('Selected %s' % openFileDialog.Paths))
try:
self.grid.ClearGrid()
# thread.start_new_thread(openFile,(openFileDialog.GetPath(),))
# t = Thread(target = self.openFile, args=(openFileDialog.GetPath(),self.selectedTypes))
t = Thread(
target=self.openFile,
args=(
openFileDialog.Paths,
self.selectedTypes))
self.progressDialog = ProgressDialog(self)
t.start()
self.progressDialog.ShowModal()
if len(openFileDialog.Paths) == 1:
self.SetTitle(openFileDialog.GetPath())
else:
self.SetTitle(
"Multiple files in " +
openFileDialog.Directory)
except e:
print(("Error while opening file.", e))
# if (random() > 0.5):
# self.SetupGrid(self.data1)
# else:
# self.SetupGrid(self.data2)
def OnFilter(self, e):
types = list(self._log_analyzer.supported_types)
checkboxDialog = MyMCD(self, "Filter", "", types)
if (checkboxDialog.ShowModal() == wx.ID_OK):
self.selectedTypes = [types[x]
for x in checkboxDialog.GetSelections()]
if self.data:
self.data_view = [
x for x in self.data if x["TypeID"] in self.selectedTypes]
self.SetupGrid()
def OnTime(self, e):
timewindowDialog = TimeWindowDialog(self, self.min_time, self.max_time)
if (timewindowDialog.ShowModal() == wx.ID_OK):
select_start = timewindowDialog.cur_start
select_end = timewindowDialog.cur_end
self.data_view = [
x for x in self.data_view if datetime.strptime(
x["Timestamp"],
'%Y-%m-%d %H:%M:%S.%f') >= select_start and datetime.strptime(
x["Timestamp"],
'%Y-%m-%d %H:%M:%S.%f') <= select_end]
self.SetupGrid()
def OnReset(self, e):
if self.data:
self.data_view = self.data
self.SetupGrid()
# def openFile(self, filePath,selectedTypes):
# self._log_analyzer.AnalyzeFile(filePath,selectedTypes)
def openFile(self, Paths, selectedTypes):
self._log_analyzer.AnalyzeFile(Paths, selectedTypes)
def OnSearch(self, e):
search_dlg = wx.TextEntryDialog(
self, "Search for", "", "", style=wx.OK | wx.CANCEL)
if (search_dlg.ShowModal() == wx.ID_OK):
keyword = search_dlg.GetValue()
self.data_view = [
x for x in self.data_view if keyword in x["Payload"]]
self.SetupGrid()
search_dlg.Destroy()
def OnAbout(self, e):
about_text = (
'MobileInsight GUI\n\n\n' +
'Copyright (c) 2014-2016 MobileInsight Team\n\n' +
'Developers:\n Moustafa Alzantot,\n' +
' Priyanka Avinash Kachare,\n' +
' Michael Ivan,\n' +
' Yuanjie Li')
search_dlg = wx.MessageDialog(
self, about_text, "About MobileInsight GUI", wx.OK)
search_dlg.ShowModal()
def OnGridSelect(self, e):
# self.statusbar.SetStatusText("Selected %d" %e.GetRow())
row = e.GetRow()
if (row < len(self.data_view)):
self.status_text.SetLabel(
"Time Stamp : %s Type : %s" %
(str(
self.data_view[row]["Timestamp"]), str(
self.data_view[row]["TypeID"])))
#self.details_text.SetValue(str(self.data_view[row]["Payload"]))
#val = xml.dom.minidom.parseString(
# str(self.data_view[row]["Payload"]))
#pretty_xml_as_string = val.toprettyxml(indent=" ", newl="\n", encoding="utf8") # maybe will trigger bug
#self.details_text.SetValue(pretty_xml_as_string)
self.content = {}
r = ET.fromstring(str(self.data_view[row]["Payload"]))
print(r.tag)
for child in r:
k = child.get("key")
if child.get("type")=="list" and len(child)==0:
self.content[k]={}
elif child.get("type") == "list" and child[0].tag == "list":
list_content = self.parse_list(child, k)
self.content[k] = list_content
elif child.get("type") == "list" and child[0].tag == "msg": # xml from wireshark
list_content = self.parse_msg(child)
self.content[k] = list_content
# print(str(list_content))
elif child.get("type")=="dict":
self.content[k]=self.parse_dict(child)
else:
self.content[k] = child.text
self.details_text.DeleteAllItems()
root = self.details_text.AddRoot('payload')
self.creat_tree(self.content,root)
self.details_text.ExpandAll()
e.Skip()
def parse_list(self, listroot, attrib_key):
'''
convert list from .xml to standard dict
:param listroot:
:param attrib_key:
:return: dict
'''
list_content = {}
if(len(listroot)==0):
return None
listroot = listroot[0]; # <pair key="CA Combos" type="list"> <list>
i = 0
for xml_list in listroot:
if xml_list.tag == "item" and xml_list.get("type") == "dict": # The only subclass of list is dict
dist_content = self.parse_dict(xml_list)
if(xml_list.get("key")==None):
list_content[attrib_key + "[" + str(i) + "]"] = dist_content
i += 1
else:
list_content[xml_list.get("key")]=dist_content
return list_content
def parse_dict(self, dictroot):
'''
convert dict from .xml to standard dict
:param dictroot:
:return:
'''
dictroot = dictroot[0] # <item type="dict"> <dict>
dict_content = {}
for d in dictroot:
k = d.get("key")
if (d.get("type") == "list"): # list in dist
list_content = self.parse_list(d, k)
dict_content[k] = list_content
elif (d.get("type")=="dict"):
list_content = self.parse_dict(d)
dict_content[k] = list_content
else:
dict_content[k] = d.text; # key-value
return dict_content
def split_key_value(self,str):
'''
e.g. "a:b"->"a","b"
:param str:
:return:
'''
start=str.find(":")
if(start!=-1):
key=str[0:start]
val=str[start+1:]
return key ,val
else:
return str,"none"
def parse_msg(self, msgroot):
'''
parse xml file which is conveyed by wireshark
:param msgroot:
:return:
'''
proto = msgroot.findall(".//proto")
dict_msg = {}
skip_context=["geninfo","frame","user_dlt"]#proto which is useless
for p in proto:
if (p.get("hide") != "yes" and p.get("name") not in skip_context):
dict_msg.update(self.parse_msg_field(p))
else:
continue
return dict_msg
def parse_msg_field(self, msgroot):
msg_dict={}
#skip_context=["geninfo","frame","user_dlt"]
for field in msgroot:
if (field.get("hide") == "yes"):
continue
elif len(field) != 0 and field.get("showname")!=None:
k=field.get("showname")
k,_=self.split_key_value(k)
val_dict = self.parse_msg_field(field)
if len(val_dict)==0:
msg_dict[k]="skip"
else:
msg_dict[k]=val_dict
elif len(field)!=0:
msg_dict.update(self.parse_msg_field(field))
else:
dict_msg = field.get("showname")
if dict_msg !=None:
k, v = self.split_key_value(dict_msg)
msg_dict[k] = v
return msg_dict
def creat_tree(self,payload_dict,root):
for k,v in payload_dict.items():
if(isinstance(v,dict)):
subroot=self.details_text.AppendItem(root,str(k))
self.creat_tree(v,subroot)
else:
if(v!="skip"):
self. details_text.AppendItem(root,str(k)+":"+str(v))
else:
self.details_text.AppendItem(root,str(k))
def Quit(self, e):
self.Destroy()
def OnReadComplete(self):
evt = ResultEvent(self._log_analyzer.msg_logs)
wx.PostEvent(wx.GetApp().frame, evt)
def SetupGrid(self):
self.min_time = datetime.strptime("3000 Jan 1", '%Y %b %d')
self.max_time = datetime.strptime("1900 Jan 1", '%Y %b %d')
n = len(self.data_view)
# self.grid.CreateGrid(max(25, n), 2)
if n > self.grid.GetNumberRows():
self.grid.InsertRows(0, n - self.grid.GetNumberRows())
else:
self.grid.DeleteRows(0, self.grid.GetNumberRows() - n)
self.grid.ClearGrid()
self.grid.SetColLabelValue(0, "Timestamp")
self.grid.SetColLabelValue(1, "Type ID")
for i in range(n):
try:
cur_time = datetime.strptime(
self.data_view[i]["Timestamp"],
'%Y-%m-%d %H:%M:%S.%f')
except Exception as e:
cur_time = datetime.strptime(
self.data_view[i]["Timestamp"], '%Y-%m-%d %H:%M:%S')
self.min_time = min(self.min_time, cur_time)
self.max_time = max(self.max_time, cur_time)
self.grid.SetCellValue(i, 0, str(self.data_view[i]["Timestamp"]))
self.grid.SetCellValue(i, 1, str(self.data_view[i]["TypeID"]))
self.grid.SetReadOnly(i, 0)
self.grid.SetReadOnly(i, 1)
# self.grid.Bind(wx.grid.EVT_GRID_CELL_LEFT_CLICK, self.onRowClick)
def main():
wx.Log.SetLogLevel(0)
app = wx.App()
app.frame = WindowClass(None)
app.MainLoop()
main()
|
multi_launcher.py
|
# coding: utf-8
from __future__ import unicode_literals
"""
This module contains methods for launching several Rockets in a parallel environment
"""
from multiprocessing import Process
import os
import threading
import time
from fireworks.fw_config import FWData, PING_TIME_SECS, DS_PASSWORD
from fireworks.core.rocket_launcher import rapidfire
from fireworks.utilities.fw_utilities import DataServer
__author__ = 'Xiaohui Qu, Anubhav Jain'
__copyright__ = 'Copyright 2013, The Material Project & The Electrolyte Genome Project'
__version__ = '0.1'
__maintainer__ = 'Xiaohui Qu'
__email__ = 'xqu@lbl.gov'
__date__ = 'Aug 19, 2013'
def ping_multilaunch(port, stop_event):
"""
A single manager to ping all launches during multiprocess launches
:param port: (int) Listening port number of the DataServer
:param stop_event: (Thread.Event) stop event
"""
ds = DataServer(address=('127.0.0.1', port), authkey=DS_PASSWORD)
ds.connect()
lp = ds.LaunchPad()
while not stop_event.is_set():
for pid, lid in ds.Running_IDs().items():
if lid:
try:
os.kill(pid, 0) # throws OSError if the process is dead
lp.ping_launch(lid)
except OSError:
pass # means this process is dead!
stop_event.wait(PING_TIME_SECS)
def rapidfire_process(fworker, nlaunches, sleep, loglvl, port, node_list, sub_nproc):
"""
Initializes shared data with multiprocessing parameters and starts a rapidfire
:param fworker: (FWorker) object
:param nlaunches: (int) 0 means 'until completion', -1 or "infinite" means to loop forever
:param sleep: (int) secs to sleep between rapidfire loop iterations
:param loglvl: (str) level at which to output logs to stdout
:param port: (int) Listening port number of the shared object manage
:param password: (str) security password to access the server
:param node_list: ([str]) computer node list
:param sub_nproc: (int) number of processors of the sub job
"""
ds = DataServer(address=('127.0.0.1', port), authkey=DS_PASSWORD)
ds.connect()
launchpad = ds.LaunchPad()
FWData().DATASERVER = ds
FWData().MULTIPROCESSING = True
FWData().NODE_LIST = node_list
FWData().SUB_NPROCS = sub_nproc
rapidfire(launchpad, fworker, None, nlaunches, -1, sleep, loglvl)
def start_rockets(fworker, nlaunches, sleep, loglvl, port, node_lists, sub_nproc_list):
"""
Create each sub job and start a rocket launch in each one
:param fworker: (FWorker) object
:param nlaunches: nlaunches: (int) 0 means 'until completion', -1 or "infinite" means to loop forever
:param sleep: (int) secs to sleep between rapidfire loop iterations
:param loglvl: (str) level at which to output logs to stdout
:param port: (int) Listening port number
:param node_lists: ([str]) computer node list
:param sub_nproc_list: ([int]) list of the number of the process of sub jobs
:return: ([multiprocessing.Process]) all the created processes
"""
processes = [Process(target=rapidfire_process, args=(fworker, nlaunches, sleep, loglvl, port, nl, sub_nproc))
for nl, sub_nproc in zip(node_lists, sub_nproc_list)]
for p in processes:
p.start()
time.sleep(0.15)
return processes
def split_node_lists(num_jobs, total_node_list=None, ppn=24):
"""
Parse node list and processor list from nodefile contents
:param num_jobs: (int) number of sub jobs
:param total_node_list: (list of str) the node list of the whole large job
:param ppn: (int) number of procesors per node
:return: (([int],[int])) the node list and processor list for each job
"""
if total_node_list:
orig_node_list = sorted(list(set(total_node_list)))
nnodes = len(orig_node_list)
if nnodes%num_jobs != 0:
raise ValueError("can't allocate nodes, {} can't be divided by {}".format(nnodes, num_jobs))
sub_nnodes = nnodes/num_jobs
sub_nproc_list = [sub_nnodes * ppn] * num_jobs
node_lists = [orig_node_list[i:i+sub_nnodes] for i in range(0, nnodes, sub_nnodes)]
else:
sub_nproc_list = [ppn] * num_jobs
node_lists = [None] * num_jobs
return node_lists, sub_nproc_list
def launch_multiprocess(launchpad, fworker, loglvl, nlaunches, num_jobs, sleep_time,
total_node_list=None, ppn=1):
"""
Launch the jobs in the job packing mode.
:param launchpad: (LaunchPad) object
:param fworker: (FWorker) object
:param loglvl: (str) level at which to output logs
:param nlaunches: (int) 0 means 'until completion', -1 or "infinite" means to loop forever
:param num_jobs: (int) number of sub jobs
:param sleep_time: (int) secs to sleep between rapidfire loop iterations
:param total_node_list: ([str]) contents of NODEFILE (doesn't affect execution)
:param ppn: (int) processors per node (doesn't affect execution)
"""
# parse node file contents
node_lists, sub_nproc_list = split_node_lists(num_jobs, total_node_list, ppn)
# create shared dataserver
ds = DataServer.setup(launchpad)
port = ds.address[1]
# launch rapidfire processes
processes = start_rockets(fworker, nlaunches, sleep_time, loglvl, port, node_lists,
sub_nproc_list)
# start pinging service
ping_stop = threading.Event()
ping_thread = threading.Thread(target=ping_multilaunch, args=(port, ping_stop))
ping_thread.start()
# wait for completion
for p in processes:
p.join()
ping_stop.set()
ping_thread.join()
ds.shutdown()
|
terminal_pane.py
|
import wx
import threading
if not wx.GetApp():
app = wx.App()
ColourDatabase = wx.ColourDatabase()
FOREGROUND_COLOURS = {
30: wx.BLACK,
31: wx.RED,
32: wx.YELLOW,
33: wx.YELLOW,
34: wx.BLUE,
35: ColourDatabase.Find('MAGENTA'),
36: wx.CYAN,
37: wx.WHITE,
39: wx.GREEN
}
BACKGROUND_COLOURS = {
40: wx.WHITE,
41: wx.RED,
42: wx.GREEN,
43: wx.YELLOW,
44: wx.BLUE,
45: ColourDatabase.Find('MAGENTA'),
46: wx.CYAN,
47: wx.WHITE,
49: wx.BLACK
}
class TerminalPane(wx.SplitterWindow):
def __init__(self, parent, serial):
wx.SplitterWindow.__init__(self, parent, -1, style=wx.SP_LIVE_UPDATE | wx.SP_3D)
self.serial = serial
self.text_ctrl1 = wx.TextCtrl(
self,
-1,
'',
style=wx.TE_READONLY | wx.TE_MULTILINE | wx.TE_DONTWRAP | wx. TE_RICH
)
self.text_ctrl1.SetForegroundColour(wx.Colour(0, 255, 0))
self.text_ctrl1.SetBackgroundColour(wx.Colour(0, 0, 0))
self.text_ctrl2 = wx.TextCtrl(
self,
-1,
'',
style=wx.TE_MULTILINE | wx.TE_DONTWRAP | wx.TE_PROCESS_ENTER | wx.TE_PROCESS_TAB
)
self.text_ctrl2.SetForegroundColour(wx.Colour(0, 255, 0))
self.text_ctrl2.SetBackgroundColour(wx.Colour(0, 0, 0))
font = self.text_ctrl1.GetFont()
self.text_attr = wx.TextAttr()
self.text_attr.SetFont(font)
self.text_attr.SetTextColour(wx.GREEN)
self.text_attr.SetBackgroundColour(wx.BLACK)
self.SplitHorizontally(self.text_ctrl1, self.text_ctrl2)
self.SetSashGravity(0.75)
self.text_ctrl2.Bind(wx.EVT_TEXT_ENTER, self.on_enter)
self.serial_lock = threading.Lock()
self._exit_event = threading.Event()
self._thread = threading.Thread(target=self.serial_read_loop)
def AppendText(self, text):
print(repr(text))
text_attr = wx.TextAttr(self.text_attr)
text_len = len(self.text_ctrl1.GetValue())
self.text_ctrl1.SetInsertionPointEnd()
if '\x1b[' in text:
text = [item.split('m', 1) for item in text.split('\x1b[') if item]
print(text)
for item in text:
if len(item) == 1:
item = ['0', item[0]]
ansi_codes, chars = item
text_attr = wx.TextAttr(text_attr)
ansi_codes = [int(item) for item in ansi_codes.split(';')]
for code in ansi_codes:
if code == 0:
text_attr.SetFontStyle(wx.FONTSTYLE_NORMAL)
text_attr.SetFontWeight(wx.FONTWEIGHT_NORMAL)
text_attr.SetFontUnderlined(False)
text_attr.SetTextColour(wx.GREEN)
text_attr.SetBackgroundColour(wx.BLACK)
elif code == 3:
text_attr.SetFontStyle(wx.FONTSTYLE_ITALIC)
elif code == 23:
text_attr.SetFontStyle(wx.FONTSTYLE_NORMAL)
elif code == 4:
text_attr.SetFontUnderlined(True)
text_attr.SetFontUnderlineType(wx.TEXT_ATTR_UNDERLINE_SOLID)
elif code == 21:
text_attr.SetFontUnderlined(True)
text_attr.SetFontUnderlineType(wx.TEXT_ATTR_UNDERLINE_DOUBLE)
elif code == 24:
text_attr.SetFontUnderlined(False)
elif code == 1:
text_attr.SetFontWeight(wx.FONTWEIGHT_BOLD)
elif code == 22:
text_attr.SetFontWeight(wx.FONTWEIGHT_NORMAL)
elif code == 2:
text_attr.SetFontWeight(wx.FONTWEIGHT_EXTRALIGHT)
elif code in FOREGROUND_COLOURS:
text_attr.SetTextColour(FOREGROUND_COLOURS[code])
elif code in BACKGROUND_COLOURS:
text_attr.SetBackgroundColour(BACKGROUND_COLOURS[code])
print(text_attr.GetTextColour())
print(text_attr.GetBackgroundColour())
self.text_ctrl1.AppendText(chars)
self.text_ctrl1.SetStyle(text_len, text_len + len(chars), text_attr)
text_len += len(chars)
else:
self.text_ctrl1.AppendText(text)
self.text_ctrl1.SetStyle(text_len, text_len + len(text), text_attr)
self.text_attr = text_attr
self.text_ctrl1.SetInsertionPointEnd()
def read(self):
return self.serial.read_decoded()
def write(self, data):
return self.serial.communicate(data)
def __enter__(self):
self.serial_lock.acquire()
self.text_ctrl2.Enable(False)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.serial_lock.release()
self.text_ctrl2.Enable()
def serial_read_loop(self):
while not self._exit_event.is_set():
with self.serial_lock:
data = self.read()
if data:
def _do(dta):
self.AppendText(dta)
wx.CallAfter(_do, data)
def on_enter(self, evt):
if self.serial_lock.locked():
return
value = self.text_ctrl2.GetValue()
lines = value.split('\n')
last_line = lines[-1]
if (
not last_line.startswith(' ') and
not last_line.startswith('\t') and
not last_line.endswith('/') and
not last_line.endswith(':')
):
for o_brace, c_brace in (('(', ')'), ('[', ']'), ('{', '}')):
brace_count = value.count(o_brace) - value.count(c_brace)
if brace_count > 0:
evt.Skip()
break
else:
self.serial.write(bytes(value + '\r\n', encoding='utf-8'))
self.text_ctrl2.SetValue('')
else:
evt.Skip()
|
utility.py
|
# coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
"""
utility: Defines common utility functions and components.
"""
import ast
import base64
import isodate
import json
import os
import sys
import re
import hmac
import hashlib
import random
from threading import Event, Thread
from datetime import datetime
from knack.log import get_logger
from knack.util import CLIError
logger = get_logger(__name__)
def parse_entity(entity, filter_none=False):
"""
Function creates a dict of object attributes.
Args:
entity (object): object to extract attributes from.
Returns:
result (dict): a dictionary of attributes from the function input.
"""
result = {}
attributes = [attr for attr in dir(entity) if not attr.startswith("_")]
for attribute in attributes:
value = getattr(entity, attribute, None)
if filter_none and not value:
continue
value_behavior = dir(value)
if "__call__" not in value_behavior:
result[attribute] = value
return result
def evaluate_literal(literal, expected):
"""
Function to provide safe evaluation of code literal.
Args:
literal (): code literal
expected (class, type, tuple): expected resulting class,
type or tuple of literal evaluation.
Returns:
result (string, number, tuple, list, dict, boolean, None).
"""
# Safe evaluation
try:
result = ast.literal_eval(literal)
if not isinstance(result, expected):
return None
return result
except Exception:
return None
def verify_transform(subject, mapping):
"""
Determines if a key from mapping exists in subject and if so
verifies that subject[k] is of type mapping[k]
"""
import jmespath
for k in mapping.keys():
result = jmespath.search(k, subject)
if result is None:
raise AttributeError('The property "{}" is required'.format(k))
if not isinstance(result, mapping[k]):
supplemental_info = ""
if mapping[k] == dict:
wiki_link = "https://github.com/Azure/azure-iot-cli-extension/wiki/Tips"
supplemental_info = "Review inline JSON examples here --> {}".format(
wiki_link
)
raise TypeError(
'The property "{}" must be of {} but is {}. Input: {}. {}'.format(
k, str(mapping[k]), str(type(result)), result, supplemental_info
)
)
def validate_key_value_pairs(string):
"""
Funtion to validate key-value pairs in the format: a=b;c=d
Args:
string (str): semicolon delimited string of key/value pairs.
Returns (dict, None): a dictionary of key value pairs.
"""
result = None
if string:
kv_list = [x for x in string.split(";") if "=" in x] # key-value pairs
result = dict(x.split("=", 1) for x in kv_list)
return result
def process_json_arg(content, argument_name, preserve_order=False):
""" Primary processor of json input """
json_from_file = None
if os.path.exists(content):
json_from_file = content
content = read_file_content(content)
try:
return shell_safe_json_parse(content, preserve_order)
except CLIError as ex:
if looks_like_file(content):
logger.warning(
"The json payload for argument '%s' looks like its intended from a file. "
"Please ensure the file path is correct.",
argument_name,
)
file_content_error = "from file: '{}' ".format(json_from_file)
raise CLIError(
"Failed to parse json {}for argument '{}' with exception:\n {}".format(
file_content_error if json_from_file else "", argument_name, ex
)
)
def shell_safe_json_parse(json_or_dict_string, preserve_order=False):
""" Allows the passing of JSON or Python dictionary strings. This is needed because certain
JSON strings in CMD shell are not received in main's argv. This allows the user to specify
the alternative notation, which does not have this problem (but is technically not JSON). """
try:
if not preserve_order:
return json.loads(json_or_dict_string)
from collections import OrderedDict
return json.loads(json_or_dict_string, object_pairs_hook=OrderedDict)
except ValueError as json_ex:
try:
return ast.literal_eval(json_or_dict_string)
except SyntaxError:
raise CLIError(json_ex)
except ValueError as ex:
logger.debug(
ex
) # log the exception which could be a python dict parsing error.
raise CLIError(
json_ex
) # raise json_ex error which is more readable and likely.
def read_file_content(file_path, allow_binary=False):
from codecs import open as codecs_open
# Note, always put 'utf-8-sig' first, so that BOM in WinOS won't cause trouble.
for encoding in ["utf-8-sig", "utf-8", "utf-16", "utf-16le", "utf-16be"]:
try:
with codecs_open(file_path, encoding=encoding) as f:
logger.debug("Attempting to read file %s as %s", file_path, encoding)
return f.read()
except (UnicodeError, UnicodeDecodeError):
pass
if allow_binary:
try:
with open(file_path, "rb") as input_file:
logger.debug("Attempting to read file %s as binary", file_path)
return base64.b64encode(input_file.read()).decode("utf-8")
except Exception: # pylint: disable=broad-except
pass
raise CLIError("Failed to decode file {} - unknown decoding".format(file_path))
def trim_from_start(s, substring):
""" Trims a substring from the target string (if it exists) returning the trimmed string.
Otherwise returns original target string. """
if s.startswith(substring):
s = s[len(substring) :]
return s
def validate_min_python_version(major, minor, error_msg=None, exit_on_fail=True):
""" If python version does not match AT LEAST requested values, will throw non 0 exit code."""
version = sys.version_info
result = False
if version.major > major:
return True
if major == version.major:
result = version.minor >= minor
if not result:
if exit_on_fail:
msg = (
error_msg
if error_msg
else "Python version {}.{} or higher required for this functionality.".format(
major, minor
)
)
sys.exit(msg)
return result
def unicode_binary_map(target):
""" Decode binary keys and values of map to unicode."""
# Assumes no iteritems()
result = {}
for k in target:
key = k
if isinstance(k, bytes):
key = str(k, "utf8")
if isinstance(target[k], bytes):
result[key] = str(target[k], "utf8")
else:
result[key] = target[k]
return result
def execute_onthread(**kwargs):
"""
Experimental generic helper for executing methods without return values on a background thread
Args:
kwargs: Supported kwargs are 'interval' (int) to specify intervals between calls
'method' (func) to specify method pointer for execution
'args' (list) to specify method arguments
'max_runs' (int) indicate an upper bound on number of executions
'return_handle' (bool) indicates whether to return a Thread handle
Returns:
Event(): Object to set the event cancellation flag
or if 'return_handle'=True
Event(), Thread(): Event object to set the cancellation flag, Executing Thread object
"""
interval = kwargs.get("interval")
method = kwargs.get("method")
method_args = kwargs.get("args")
max_runs = kwargs.get("max_runs")
handle = kwargs.get("return_handle")
if not interval:
interval = 2
if not method:
raise ValueError('kwarg "method" required for execution')
if not method_args:
method_args = []
cancellation_token = Event()
def method_wrap(max_runs=None):
runs = 0
while not cancellation_token.wait(interval):
if max_runs:
if runs >= max_runs:
break
method(*method_args)
runs += 1
op = Thread(target=method_wrap, args=(max_runs,))
op.start()
if handle:
return cancellation_token, op
return cancellation_token
def url_encode_dict(d):
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
return urlencode(d)
def url_encode_str(s, plus=False):
try:
if plus:
from urllib import quote_plus
else:
from urllib import quote
except ImportError:
if plus:
from urllib.parse import quote_plus
else:
from urllib.parse import quote
return quote_plus(s) if plus else quote(s)
def test_import(package):
""" Used to determine if a dependency is loading correctly """
import importlib
try:
importlib.import_module(package)
except ImportError:
return False
return True
def unpack_pnp_http_error(e):
error = unpack_msrest_error(e)
if isinstance(error, dict):
if error.get("error"):
error = error["error"]
if error.get("stackTrace"):
error.pop("stackTrace")
return error
def unpack_msrest_error(e):
""" Obtains full response text from an msrest error """
op_err = None
try:
op_err = json.loads(e.response.text)
except (ValueError, TypeError):
op_err = e.response.text
if not op_err:
return str(e)
return op_err
def dict_transform_lower_case_key(d):
""" Converts a dictionary to an identical one with all lower case keys """
return {k.lower(): v for k, v in d.items()}
def calculate_millisec_since_unix_epoch_utc():
now = datetime.utcnow()
epoch = datetime.utcfromtimestamp(0)
return int(1000 * (now - epoch).total_seconds())
def init_monitoring(cmd, timeout, properties, enqueued_time, repair, yes):
from azext_iot.common.deps import ensure_uamqp
from knack.util import CLIError
validate_min_python_version(3, 5)
if timeout < 0:
raise CLIError("Monitoring timeout must be 0 (inf) or greater.")
timeout = timeout * 1000
config = cmd.cli_ctx.config
output = cmd.cli_ctx.invocation.data.get("output", None)
if not output:
output = "json"
ensure_uamqp(config, yes, repair)
if not properties:
properties = []
properties = set((key.lower() for key in properties))
if not enqueued_time:
enqueued_time = calculate_millisec_since_unix_epoch_utc()
return (enqueued_time, properties, timeout, output)
def get_sas_token(target):
from azext_iot.common.digitaltwin_sas_token_auth import (
DigitalTwinSasTokenAuthentication,
)
token = ""
if target.get("repository_id"):
token = DigitalTwinSasTokenAuthentication(
target["repository_id"],
target["entity"],
target["policy"],
target["primarykey"],
).generate_sas_token()
return {"Authorization": "{}".format(token)}
def dict_clean(d):
""" Remove None from dictionary """
if not isinstance(d, dict):
return d
return dict((k, dict_clean(v)) for k, v in d.items() if v is not None)
def looks_like_file(element):
element = element.lower()
return element.endswith(
(
".log",
".rtf",
".txt",
".json",
".yaml",
".yml",
".md",
".rst",
".doc",
".docx",
".html",
".htm",
".py",
".java",
".ts",
".js",
".cs",
)
)
def ensure_pkg_resources_entries():
import pkg_resources
from azure.cli.core.extension import get_extension_path
from azext_iot.constants import EXTENSION_NAME
extension_path = get_extension_path(EXTENSION_NAME)
if extension_path not in pkg_resources.working_set.entries:
pkg_resources.working_set.add_entry(extension_path)
return
class ISO8601Validator:
def is_iso8601_date(self, to_validate) -> bool:
try:
return bool(isodate.parse_date(to_validate))
except Exception:
return False
def is_iso8601_datetime(self, to_validate: str) -> bool:
try:
return bool(isodate.parse_datetime(to_validate))
except Exception:
return False
def is_iso8601_duration(self, to_validate: str) -> bool:
try:
return bool(isodate.parse_duration(to_validate))
except Exception:
return False
def is_iso8601_time(self, to_validate: str) -> bool:
try:
return bool(isodate.parse_time(to_validate))
except Exception:
return False
def ensure_min_version(cur_ver, min_ver):
from pkg_resources._vendor.packaging import version
return version.parse(cur_ver) >= version.parse(min_ver)
def scantree(path):
for entry in os.scandir(path):
if entry.is_dir(follow_symlinks=False):
yield from scantree(entry.path)
else:
yield entry
def find_between(s, start, end):
return (s.split(start))[1].split(end)[0]
def valid_hostname(host_name):
"""
Approximate validation
Reference: https://en.wikipedia.org/wiki/Hostname
"""
if len(host_name) > 253:
return False
valid_label = re.compile(r"(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
label_parts = host_name.split(".")
return all(valid_label.match(label) for label in label_parts)
def compute_device_key(primary_key, registration_id):
"""
Compute device SAS key
Args:
primary_key: Primary group SAS token to compute device keys
registration_id: Registration ID is alphanumeric, lowercase, and may contain hyphens.
Returns:
device key
"""
secret = base64.b64decode(primary_key)
device_key = base64.b64encode(
hmac.new(
secret, msg=registration_id.encode("utf8"), digestmod=hashlib.sha256
).digest()
)
return device_key
def generate_key(byte_length=32):
key = ""
while byte_length > 0:
key += chr(random.randrange(1, 128))
byte_length -= 1
return base64.b64encode(key.encode()).decode("utf-8")
|
04-receive_data.py
|
#!/usr/bin/env python3
# Copyright (C) 2018 Simon Brummer <simon.brummer@posteo.de>
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
import os
import sys
import threading
from testrunner import run
from shared_func import TcpServer, generate_port_number, get_host_tap_device, \
get_host_ll_addr, get_riot_if_id, setup_internal_buffer, \
read_data_from_internal_buffer, verify_pktbuf_empty, \
sudo_guard
def tcp_server(port, shutdown_event, data):
with TcpServer(port, shutdown_event) as tcp_srv:
tcp_srv.send(data)
def testfunc(child):
port = generate_port_number()
shutdown_event = threading.Event()
# Try to receive 2000 bytes sent from the Host System.
data = '0123456789' * 200
data_len = len(data)
# Verify that RIOT Applications internal buffer can hold test data.
assert setup_internal_buffer(child) >= data_len
server_handle = threading.Thread(target=tcp_server, args=(port, shutdown_event, data))
server_handle.start()
target_addr = get_host_ll_addr(get_host_tap_device()) + '%' + get_riot_if_id(child)
# Setup RIOT Node to connect to Hostsystems TCP Server
child.sendline('gnrc_tcp_tcb_init')
child.sendline('gnrc_tcp_open_active [{}]:{} 0'.format(target_addr, str(port)))
child.expect_exact('gnrc_tcp_open_active: returns 0')
# Accept Data sent by the host system
child.sendline('gnrc_tcp_recv 1000000 ' + str(data_len))
child.expect_exact('gnrc_tcp_recv: received ' + str(data_len), timeout=20)
# Close connection and verify that pktbuf is cleared
shutdown_event.set()
child.sendline('gnrc_tcp_close')
server_handle.join()
verify_pktbuf_empty(child)
# Verify received Data
assert read_data_from_internal_buffer(child, data_len) == data
print(os.path.basename(sys.argv[0]) + ': success')
if __name__ == '__main__':
sudo_guard()
sys.exit(run(testfunc, timeout=5, echo=False, traceback=True))
|
test_queue.py
|
# Some simple queue module tests, plus some failure conditions
# to ensure the Queue locks remain stable.
import queue
import time
import unittest
from test import support
threading = support.import_module('threading')
QUEUE_SIZE = 5
def qfull(q):
return q.maxsize > 0 and q.qsize() == q.maxsize
# A thread to run a function that unclogs a blocked Queue.
class _TriggerThread(threading.Thread):
def __init__(self, fn, args):
self.fn = fn
self.args = args
self.startedEvent = threading.Event()
threading.Thread.__init__(self)
def run(self):
# The sleep isn't necessary, but is intended to give the blocking
# function in the main thread a chance at actually blocking before
# we unclog it. But if the sleep is longer than the timeout-based
# tests wait in their blocking functions, those tests will fail.
# So we give them much longer timeout values compared to the
# sleep here (I aimed at 10 seconds for blocking functions --
# they should never actually wait that long - they should make
# progress as soon as we call self.fn()).
time.sleep(0.1)
self.startedEvent.set()
self.fn(*self.args)
# Execute a function that blocks, and in a separate thread, a function that
# triggers the release. Returns the result of the blocking function. Caution:
# block_func must guarantee to block until trigger_func is called, and
# trigger_func must guarantee to change queue state so that block_func can make
# enough progress to return. In particular, a block_func that just raises an
# exception regardless of whether trigger_func is called will lead to
# timing-dependent sporadic failures, and one of those went rarely seen but
# undiagnosed for years. Now block_func must be unexceptional. If block_func
# is supposed to raise an exception, call do_exceptional_blocking_test()
# instead.
class BlockingTestMixin:
def tearDown(self):
self.t = None
def do_blocking_test(self, block_func, block_args, trigger_func, trigger_args):
self.t = _TriggerThread(trigger_func, trigger_args)
self.t.start()
self.result = block_func(*block_args)
# If block_func returned before our thread made the call, we failed!
if not self.t.startedEvent.is_set():
self.fail("blocking function '%r' appeared not to block" %
block_func)
self.t.join(10) # make sure the thread terminates
if self.t.is_alive():
self.fail("trigger function '%r' appeared to not return" %
trigger_func)
return self.result
# Call this instead if block_func is supposed to raise an exception.
def do_exceptional_blocking_test(self,block_func, block_args, trigger_func,
trigger_args, expected_exception_class):
self.t = _TriggerThread(trigger_func, trigger_args)
self.t.start()
try:
try:
block_func(*block_args)
except expected_exception_class:
raise
else:
self.fail("expected exception of kind %r" %
expected_exception_class)
finally:
self.t.join(10) # make sure the thread terminates
if self.t.is_alive():
self.fail("trigger function '%r' appeared to not return" %
trigger_func)
if not self.t.startedEvent.is_set():
self.fail("trigger thread ended but event never set")
class BaseQueueTestMixin(BlockingTestMixin):
def setUp(self):
self.cum = 0
self.cumlock = threading.Lock()
def simple_queue_test(self, q):
if q.qsize():
raise RuntimeError("Call this function with an empty queue")
self.assertTrue(q.empty())
self.assertFalse(q.full())
# I guess we better check things actually queue correctly a little :)
q.put(111)
q.put(333)
q.put(222)
target_order = dict(Queue = [111, 333, 222],
LifoQueue = [222, 333, 111],
PriorityQueue = [111, 222, 333])
actual_order = [q.get(), q.get(), q.get()]
self.assertEqual(actual_order, target_order[q.__class__.__name__],
"Didn't seem to queue the correct data!")
for i in range(QUEUE_SIZE-1):
q.put(i)
self.assertTrue(q.qsize(), "Queue should not be empty")
self.assertTrue(not qfull(q), "Queue should not be full")
last = 2 * QUEUE_SIZE
full = 3 * 2 * QUEUE_SIZE
q.put(last)
self.assertTrue(qfull(q), "Queue should be full")
self.assertFalse(q.empty())
self.assertTrue(q.full())
try:
q.put(full, block=0)
self.fail("Didn't appear to block with a full queue")
except queue.Full:
pass
try:
q.put(full, timeout=0.01)
self.fail("Didn't appear to time-out with a full queue")
except queue.Full:
pass
# Test a blocking put
self.do_blocking_test(q.put, (full,), q.get, ())
self.do_blocking_test(q.put, (full, True, 10), q.get, ())
# Empty it
for i in range(QUEUE_SIZE):
q.get()
self.assertTrue(not q.qsize(), "Queue should be empty")
try:
q.get(block=0)
self.fail("Didn't appear to block with an empty queue")
except queue.Empty:
pass
try:
q.get(timeout=0.01)
self.fail("Didn't appear to time-out with an empty queue")
except queue.Empty:
pass
# Test a blocking get
self.do_blocking_test(q.get, (), q.put, ('empty',))
self.do_blocking_test(q.get, (True, 10), q.put, ('empty',))
def worker(self, q):
while True:
x = q.get()
if x < 0:
q.task_done()
return
with self.cumlock:
self.cum += x
q.task_done()
def queue_join_test(self, q):
self.cum = 0
for i in (0,1):
threading.Thread(target=self.worker, args=(q,)).start()
for i in range(100):
q.put(i)
q.join()
self.assertEqual(self.cum, sum(range(100)),
"q.join() did not block until all tasks were done")
for i in (0,1):
q.put(-1) # instruct the threads to close
q.join() # verify that you can join twice
def test_queue_task_done(self):
# Test to make sure a queue task completed successfully.
q = self.type2test()
try:
q.task_done()
except ValueError:
pass
else:
self.fail("Did not detect task count going negative")
def test_queue_join(self):
# Test that a queue join()s successfully, and before anything else
# (done twice for insurance).
q = self.type2test()
self.queue_join_test(q)
self.queue_join_test(q)
try:
q.task_done()
except ValueError:
pass
else:
self.fail("Did not detect task count going negative")
def test_simple_queue(self):
# Do it a couple of times on the same queue.
# Done twice to make sure works with same instance reused.
q = self.type2test(QUEUE_SIZE)
self.simple_queue_test(q)
self.simple_queue_test(q)
def test_negative_timeout_raises_exception(self):
q = self.type2test(QUEUE_SIZE)
with self.assertRaises(ValueError):
q.put(1, timeout=-1)
with self.assertRaises(ValueError):
q.get(1, timeout=-1)
def test_nowait(self):
q = self.type2test(QUEUE_SIZE)
for i in range(QUEUE_SIZE):
q.put_nowait(1)
with self.assertRaises(queue.Full):
q.put_nowait(1)
for i in range(QUEUE_SIZE):
q.get_nowait()
with self.assertRaises(queue.Empty):
q.get_nowait()
def test_shrinking_queue(self):
# issue 10110
q = self.type2test(3)
q.put(1)
q.put(2)
q.put(3)
with self.assertRaises(queue.Full):
q.put_nowait(4)
self.assertEqual(q.qsize(), 3)
q.maxsize = 2 # shrink the queue
with self.assertRaises(queue.Full):
q.put_nowait(4)
class QueueTest(BaseQueueTestMixin, unittest.TestCase):
type2test = queue.Queue
class LifoQueueTest(BaseQueueTestMixin, unittest.TestCase):
type2test = queue.LifoQueue
class PriorityQueueTest(BaseQueueTestMixin, unittest.TestCase):
type2test = queue.PriorityQueue
# A Queue subclass that can provoke failure at a moment's notice :)
class FailingQueueException(Exception):
pass
class FailingQueue(queue.Queue):
def __init__(self, *args):
self.fail_next_put = False
self.fail_next_get = False
queue.Queue.__init__(self, *args)
def _put(self, item):
if self.fail_next_put:
self.fail_next_put = False
raise FailingQueueException("You Lose")
return queue.Queue._put(self, item)
def _get(self):
if self.fail_next_get:
self.fail_next_get = False
raise FailingQueueException("You Lose")
return queue.Queue._get(self)
class FailingQueueTest(BlockingTestMixin, unittest.TestCase):
def failing_queue_test(self, q):
if q.qsize():
raise RuntimeError("Call this function with an empty queue")
for i in range(QUEUE_SIZE-1):
q.put(i)
# Test a failing non-blocking put.
q.fail_next_put = True
try:
q.put("oops", block=0)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
q.fail_next_put = True
try:
q.put("oops", timeout=0.1)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
q.put("last")
self.assertTrue(qfull(q), "Queue should be full")
# Test a failing blocking put
q.fail_next_put = True
try:
self.do_blocking_test(q.put, ("full",), q.get, ())
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
# Check the Queue isn't damaged.
# put failed, but get succeeded - re-add
q.put("last")
# Test a failing timeout put
q.fail_next_put = True
try:
self.do_exceptional_blocking_test(q.put, ("full", True, 10), q.get, (),
FailingQueueException)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
# Check the Queue isn't damaged.
# put failed, but get succeeded - re-add
q.put("last")
self.assertTrue(qfull(q), "Queue should be full")
q.get()
self.assertTrue(not qfull(q), "Queue should not be full")
q.put("last")
self.assertTrue(qfull(q), "Queue should be full")
# Test a blocking put
self.do_blocking_test(q.put, ("full",), q.get, ())
# Empty it
for i in range(QUEUE_SIZE):
q.get()
self.assertTrue(not q.qsize(), "Queue should be empty")
q.put("first")
q.fail_next_get = True
try:
q.get()
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
self.assertTrue(q.qsize(), "Queue should not be empty")
q.fail_next_get = True
try:
q.get(timeout=0.1)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
self.assertTrue(q.qsize(), "Queue should not be empty")
q.get()
self.assertTrue(not q.qsize(), "Queue should be empty")
q.fail_next_get = True
try:
self.do_exceptional_blocking_test(q.get, (), q.put, ('empty',),
FailingQueueException)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
# put succeeded, but get failed.
self.assertTrue(q.qsize(), "Queue should not be empty")
q.get()
self.assertTrue(not q.qsize(), "Queue should be empty")
def test_failing_queue(self):
# Test to make sure a queue is functioning correctly.
# Done twice to the same instance.
q = FailingQueue(QUEUE_SIZE)
self.failing_queue_test(q)
self.failing_queue_test(q)
def test_main():
support.run_unittest(QueueTest, LifoQueueTest, PriorityQueueTest,
FailingQueueTest)
if __name__ == "__main__":
test_main()
|
datasets.py
|
# Copyright 2019-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
This dataset module supports various formats of datasets, including ImageNet, TFData,
MNIST, Cifar10/100, Manifest, MindRecord, and more. This module loads data with
high performance and parses data precisely. Some of the operations that are
provided to users to preprocess data include shuffle, batch, repeat, map, and zip.
"""
import atexit
import glob
import json
import math
import os
import signal
import stat
import time
import uuid
import multiprocessing
from multiprocessing.pool import RUN
import queue
from enum import Enum
from functools import partial
from importlib import import_module
import sys
import threading
import copy
import weakref
import platform
import psutil
import numpy as np
from scipy.io import loadmat
from PIL import Image
import mindspore._c_dataengine as cde
from mindspore._c_expression import typing
from mindspore.common import Tensor
from mindspore import log as logger
from mindspore.parallel._ps_context import _is_role_pserver, _is_role_sched
from mindspore.parallel._utils import _get_device_num
import mindspore.dataset.transforms.py_transforms as py_transforms
from . import samplers
from .iterators import DictIterator, TupleIterator, DummyIterator, check_iterator_cleanup, _set_iterator_cleanup, \
ITERATORS_LIST, _unset_iterator_cleanup
from .queue import _SharedQueue
from .validators import check_batch, check_shuffle, check_map, check_filter, check_repeat, check_skip, check_zip, \
check_rename, check_numpyslicesdataset, check_device_send, check_take, check_project, check_imagefolderdataset, \
check_mnist_cifar_dataset, check_manifestdataset, check_tfrecorddataset, check_vocdataset, check_cocodataset, \
check_celebadataset, check_minddataset, check_generatordataset, check_sync_wait, check_zip_dataset, \
check_add_column, check_textfiledataset, check_concat, check_random_dataset, check_split, \
check_bucket_batch_by_length, check_cluedataset, check_save, check_csvdataset, check_paddeddataset, \
check_tuple_iterator, check_dict_iterator, check_schema, check_to_device_send, check_flickr_dataset, \
check_sb_dataset, check_flowers102dataset, check_cityscapes_dataset, check_usps_dataset, check_div2k_dataset, \
check_sbu_dataset, check_qmnist_dataset, check_emnist_dataset, check_fake_image_dataset, check_places365_dataset, \
check_photo_tour_dataset
from ..core.config import get_callback_timeout, _init_device_info, get_enable_shared_mem, get_num_parallel_workers, \
get_prefetch_size
from ..core.datatypes import mstype_to_detype, mstypelist_to_detypelist
from ..core.validator_helpers import replace_none
from ..core.py_util_helpers import ExceptionHandler
from ..transforms.py_transforms_util import FuncWrapper
try:
context = import_module("mindspore.context")
except ModuleNotFoundError:
context = None
class Shuffle(str, Enum):
GLOBAL: str = "global"
FILES: str = "files"
INFILE: str = "infile"
ShuffleToShuffleMode = {Shuffle.FILES: cde.ShuffleMode.FILES,
Shuffle.GLOBAL: cde.ShuffleMode.GLOBAL,
Shuffle.INFILE: cde.ShuffleMode.INFILE}
def shuffle_to_shuffle_mode(shuffle):
"""
Shuffle Enum to Shuffle Mode
Args:
shuffle (Shuffle): shuffle flag to shuffle mode in C layer
Returns:
ShuffleMode, shuffle mode
"""
shuffle_mode = cde.ShuffleMode.GLOBAL # Global shuffle
if not isinstance(shuffle, Shuffle):
if shuffle is None or shuffle:
shuffle_mode = cde.ShuffleMode.GLOBAL # Global shuffle
else:
shuffle_mode = cde.ShuffleMode.FALSE # No shuffle
else:
shuffle_mode = ShuffleToShuffleMode[shuffle]
return shuffle_mode
def shuffle_to_bool(shuffle):
"""
Shuffle Enum to bool
Args:
shuffle (Shuffle): shuffle flag to bool
Returns:
bool, True / False
"""
shuffle_bool = True
if not isinstance(shuffle, Shuffle):
if shuffle is None:
shuffle_bool = None
elif shuffle:
shuffle_bool = True
else:
shuffle_bool = False
else:
shuffle_bool = True
return shuffle_bool
@check_zip
def zip(datasets):
"""
Zip the datasets in the input tuple of datasets.
Args:
datasets (tuple of class Dataset): A tuple of datasets to be zipped together.
The number of datasets must be more than 1.
Returns:
ZipDataset, dataset zipped.
Raises:
ValueError: If the number of datasets is 1.
TypeError: If datasets is not a tuple.
Examples:
>>> # Create a dataset which is the combination of dataset_1 and dataset_2
>>> dataset = ds.zip((dataset_1, dataset_2))
"""
if len(datasets) <= 1:
raise ValueError(
"Can't zip empty or just one dataset!")
for dataset in datasets:
if not isinstance(dataset, Dataset):
raise TypeError("Invalid dataset, expected Dataset object, but got %s!" % type(dataset))
return ZipDataset(datasets)
def _get_operator_process():
"""
Inner implemented method, mainly for passing sub-process id in C layer
Returns:
dict, mapping dict of operator id and corresponding process id.
"""
global _OP_PROCESS
process_info = _OP_PROCESS
op_process = dict()
keys = process_info.keys()
fetched_all = True
for key in keys:
op_process[key] = list(process_info[key][1])
item_full = (len(process_info[key][1]) == process_info[key][0])
fetched_all = fetched_all and item_full
return op_process, fetched_all
def _set_dataset_permissions(file_name, num_files):
"""
set saved dataset files' permissions to 600
the rule of dataset filenames should be the same as those in C++.
"""
num_digits = len(str(num_files - 1))
if num_files == 1:
paths = [file_name]
else:
paths = ["{}{}".format(file_name, str(x).rjust(num_digits, '0')) for x in range(num_files)]
for item in paths:
if os.path.exists(item):
os.chmod(item, stat.S_IRUSR | stat.S_IWUSR)
index_file = item + ".db"
if os.path.exists(index_file):
os.chmod(index_file, stat.S_IRUSR | stat.S_IWUSR)
class Dataset:
"""
Abstract class to represent a dataset in DataEngine's data pipeline.
This class is the base class of SourceDataset and Dataset, and represents
a node in the data flow graph.
Args:
num_parallel_workers (int, optional): Number of workers to process the dataset in parallel
(default=None).
"""
def __init__(self, children=None, num_parallel_workers=None, cache=None):
# Note: children and parent are internal variables, not recommended for external using.
self.children = replace_none(children, [])
if isinstance(self.children, tuple):
self.children = list(self.children)
if not isinstance(self.children, list):
self.children = [self.children]
self.parent = []
for child in self.children:
child.parent.append(weakref.ref(self))
self.num_parallel_workers = num_parallel_workers
self.cache = cache
self._device_iter = 0
self._input_indexs = ()
self.saved_output_types = None
self.saved_output_shapes = None
self.dynamic_setting = [False, None]
self.saved_min_shapes = None
self.saved_max_shapes = None
self._col_names = None
self.dataset_size = None
self._batch_size = None
self._num_classes = None
self._repeat_count = None
self._class_indexing = None
self._sync = False
def create_ir_tree(self):
"""
Internal method to build an IR tree.
Returns:
DatasetNode, the root node of the IR tree.
Dataset, the root dataset of the IR tree.
"""
parent = self.parent
self.parent = []
dataset = copy.deepcopy(self)
global _OP_NAME
_OP_NAME = Dataset._get_operator_id(dataset)
ir_tree = dataset.parse_tree()
self.parent = parent
_init_device_info()
return ir_tree, dataset
def close_pool(self):
"""
Close multiprocessing pool in dataset. If you are familiar with multiprocessing library, you can regard this
as a destructor for a processingPool object.
"""
if hasattr(self, 'process_pool') and self.process_pool is not None:
self.process_pool.close()
for child in self.children:
child.close_pool()
def notify_watchdog(self):
"""
Close watchdog thread in dataset. Now GeneratorDataset/map/batch will use a thread named watch_dog tp monitor
multiprocess, for get_dataset_size/output_shapes/output_types/get_col_name/num_classes, we need notify_watchdog
to close watch_dog thread manually.
"""
if hasattr(self, 'sample_fn') and self.sample_fn is not None:
if self.sample_fn.multi_process:
self.sample_fn._abort_watchdog() # pylint: disable=W0212
if hasattr(self, 'watch_dog') and self.watch_dog is not None and hasattr(self, 'eot') and self.eot is not None:
self._abort_watchdog()
for child in self.children:
child.notify_watchdog()
@staticmethod
def _get_operator_id(dataset):
"""
Internal method to iterate the tree and obtain op_id of each operator.
Returns:
Dataset, the root dataset of the tree.
"""
op_name = dict()
generator_process = dict()
op_name[str(dataset)] = 0
op_id = 1
def process_name(datasets, operator_id):
if not datasets:
return 0
temp = []
for item in datasets:
for d in item.children:
temp.append(d)
op_name[str(d)] = operator_id
if isinstance(d, GeneratorDataset) and d.sample_fn and d.sample_fn.pids:
generator_process[operator_id] = [d.num_parallel_workers, set(d.sample_fn.pids)]
operator_id = operator_id + 1
return process_name(temp, operator_id)
process_name([dataset], op_id)
if generator_process:
global _OP_PROCESS
_OP_PROCESS.update(generator_process)
return op_name
def parse_tree(self):
"""
Internal method to parse the API tree into an IR tree.
Returns:
DatasetNode, the root node of the IR tree.
"""
if len(self.parent) > 1:
raise ValueError("The data pipeline is not a tree (i.e., one node has 2 consumers)")
ir_children = [d.parse_tree() for d in self.children]
# Bootstrap can only be performed on a copy of the original dataset node.
# Bootstrap on original dataset node will make all iterators share the same process pool
self.iterator_bootstrap()
ir_node = self.parse(ir_children)
ir_node = self.post_parse(ir_node)
return ir_node
def __safe_deepcopy__(self, memodict, exclude=()):
if id(self) in memodict:
return memodict[id(self)]
cls = self.__class__
new_op = cls.__new__(cls)
memodict[id(self)] = new_op
for arg, value in self.__dict__.items():
if arg in exclude:
setattr(new_op, arg, value)
else:
try:
setattr(new_op, arg, copy.deepcopy(value, memodict))
except TypeError:
setattr(new_op, arg, value)
return new_op
def iterator_bootstrap(self):
pass
@staticmethod
def _noop_mode():
if _is_role_sched() or _is_role_pserver():
return True
return False
def __add__(self, datasets):
return self.concat(datasets)
def to_json(self, filename=""):
"""
Serialize a pipeline into JSON string and dump into file if filename is provided.
Args:
filename (str): filename of JSON file to be saved as.
Returns:
str, JSON string of the pipeline.
"""
ir_tree, _ = self.create_ir_tree()
return json.loads(ir_tree.to_json(filename))
@check_bucket_batch_by_length
def bucket_batch_by_length(self, column_names, bucket_boundaries, bucket_batch_sizes, element_length_function=None,
pad_info=None, pad_to_bucket_boundary=False, drop_remainder=False):
"""
Bucket elements according to their lengths. Each bucket will be padded and batched when
they are full.
A length function is called on each row in the dataset. The row is then
bucketed based on its length and bucket boundaries. When a bucket reaches its
corresponding size specified in bucket_batch_sizes, the entire bucket will be
padded according to batch_info, and then form a batch.
Each batch will be full, except one special case: the last batch for each bucket may not be full.
Args:
column_names (list[str]): Columns passed to element_length_function.
bucket_boundaries (list[int]): A list consisting of the upper boundaries
of the buckets. Must be strictly increasing. If there are n boundaries,
n+1 buckets are created: One bucket for [0, bucket_boundaries[0]), one
bucket for [bucket_boundaries[i], bucket_boundaries[i+1]) for each
0<i<n-1, and last bucket for [bucket_boundaries[n-1], inf).
bucket_batch_sizes (list[int]): A list consisting of the batch sizes for
each bucket. Must contain len(bucket_boundaries)+1 elements.
element_length_function (Callable, optional): A function that takes in
M arguments where M = len(column_names) and returns an integer. If no value
provided, parameter M the len(column_names) must be 1, and the size of the first
dimension of that column will be taken as the length (default=None).
pad_info (dict, optional): The information about how to batch each column. The key
corresponds to the column name, and the value must be a tuple of 2 elements.
The first element corresponds to the shape to pad to, and the second
element corresponds to the value to pad with. If a column is not
specified, then that column will be padded to the longest in the current
batch, and 0 will be used as the padding value. Any None dimensions will
be padded to the longest in the current batch, unless if
pad_to_bucket_boundary is True. If no padding is wanted, set pad_info
to None (default=None).
pad_to_bucket_boundary (bool, optional): If True, will pad each None
dimension in pad_info to the bucket_boundary minus 1. If there are any
elements that fall into the last bucket, an error will occur
(default=False).
drop_remainder (bool, optional): If True, will drop the last batch for each
bucket if it is not a full batch (default=False).
Returns:
BucketBatchByLengthDataset, dataset bucketed and batched by length.
Examples:
>>> # Create a dataset where certain counts rows are combined into a batch
>>> # and drops the last incomplete batch if there is one.
>>> import numpy as np
>>> def generate_2_columns(n):
... for i in range(n):
... yield (np.array([i]), np.array([j for j in range(i + 1)]))
>>>
>>> column_names = ["col1", "col2"]
>>> dataset = ds.GeneratorDataset(generate_2_columns(8), column_names)
>>> bucket_boundaries = [5, 10]
>>> bucket_batch_sizes = [2, 1, 1]
>>> element_length_function = (lambda col1, col2: max(len(col1), len(col2)))
>>> # Will pad col2 to shape [bucket_boundaries[i]] where i is the
>>> # index of the bucket that is currently being batched.
>>> pad_info = {"col2": ([None], -1)}
>>> pad_to_bucket_boundary = True
>>> dataset = dataset.bucket_batch_by_length(column_names, bucket_boundaries,
... bucket_batch_sizes,
... element_length_function, pad_info,
... pad_to_bucket_boundary)
"""
return BucketBatchByLengthDataset(self, column_names, bucket_boundaries, bucket_batch_sizes,
element_length_function, pad_info, pad_to_bucket_boundary, drop_remainder)
@check_batch
def batch(self, batch_size, drop_remainder=False, num_parallel_workers=None, per_batch_map=None,
input_columns=None, output_columns=None, column_order=None, pad_info=None,
python_multiprocessing=False, max_rowsize=16):
"""
Combine batch_size number of consecutive rows into batches.
For any child node, a batch is treated as a single row.
For any column, all the elements within that column must have the same shape.
If a per_batch_map callable is provided, it will be applied to the batches of tensors.
Note:
The order of using repeat and batch reflects the number of batches and per_batch_map.
It is recommended that the repeat operation applied after the batch operation finished.
Args:
batch_size (int or function): The number of rows each batch is created with. An
int or callable object which takes exactly 1 parameter, BatchInfo.
drop_remainder (bool, optional): Determines whether or not to drop the last block
whose data row number is less than batch size (default=False). If True, and if there are less
than batch_size rows available to make the last batch, then those rows will
be dropped and not propagated to the child node.
num_parallel_workers (int, optional): Number of workers(threads) to process the dataset in parallel
(default=None).
per_batch_map (callable, optional): Per batch map callable. A callable which takes
(list[Tensor], list[Tensor], ..., BatchInfo) as input parameters. Each list[Tensor] represents a batch
of Tensors on a given column. The number of lists should match with number of entries in input_columns.
The last parameter of the callable should always be a BatchInfo object. Per_batch_map should return
(list[Tensor], list[Tensor], ...). The length of each list in output should be same as the input.
output_columns is required if the number of output lists is different from input.
input_columns (Union[str, list[str]], optional): List of names of the input columns. The size of the list
should match with signature of per_batch_map callable (default=None).
output_columns (Union[str, list[str]], optional): List of names assigned to the columns
outputted by the last operation. This parameter is mandatory if len(input_columns) !=
len(output_columns). The size of this list must match the number of output
columns of the last operation. (default=None, output columns will have the same
name as the input columns, i.e., the columns will be replaced).
column_order (Union[str, list[str]], optional): Specifies the list of all the columns you need in the whole
dataset. The parameter is required when len(input_column) != len(output_column). Caution: the list here
is not just the columns specified in parameter input_columns and output_columns.
pad_info (dict, optional): Whether to perform padding on selected columns. pad_info={"col1":([224,224],0)}
would pad column with name "col1" to a tensor of size [224,224] and fill the missing with 0
(default=None).
python_multiprocessing (bool, optional): Parallelize Python function per_batch_map with multi-processing.
This option could be beneficial if the function is computational heavy (default=False).
max_rowsize(int, optional): Maximum size of row in MB that is used for shared memory allocation to copy
data between processes. This is only used if python_multiprocessing is set to True (default=16).
Returns:
BatchDataset, dataset batched.
Examples:
>>> # Create a dataset where every 100 rows are combined into a batch
>>> # and drops the last incomplete batch if there is one.
>>> dataset = dataset.batch(100, True)
>>> # resize image according to its batch number, if it's 5-th batch, resize to (5^2, 5^2) = (25, 25)
>>> def np_resize(col, batchInfo):
... output = col.copy()
... s = (batchInfo.get_batch_num() + 1) ** 2
... index = 0
... for c in col:
... img = Image.fromarray(c.astype('uint8')).convert('RGB')
... img = img.resize((s, s), Image.ANTIALIAS)
... output[index] = np.array(img)
... index += 1
... return (output,)
>>> dataset = dataset.batch(batch_size=8, input_columns=["image"], per_batch_map=np_resize)
"""
return BatchDataset(self, batch_size, drop_remainder, num_parallel_workers, per_batch_map, input_columns,
output_columns, column_order, pad_info, python_multiprocessing, max_rowsize)
@check_sync_wait
def sync_wait(self, condition_name, num_batch=1, callback=None):
"""
Add a blocking condition to the input Dataset. A synchronize action will be applied.
Args:
condition_name (str): The condition name that is used to toggle sending next row.
num_batch (int): the number of batches without blocking at the start of each epoch.
callback (function): The callback function that will be invoked when sync_update is called.
Returns:
SyncWaitDataset, dataset added a blocking condition.
Raises:
RuntimeError: If condition name already exists.
Examples:
>>> import numpy as np
>>> def gen():
... for i in range(100):
... yield (np.array(i),)
>>>
>>> class Augment:
... def __init__(self, loss):
... self.loss = loss
...
... def preprocess(self, input_):
... return input_
...
... def update(self, data):
... self.loss = data["loss"]
>>>
>>> batch_size = 4
>>> dataset = ds.GeneratorDataset(gen, column_names=["input"])
>>>
>>> aug = Augment(0)
>>> dataset = dataset.sync_wait(condition_name="policy", callback=aug.update)
>>> dataset = dataset.map(operations=[aug.preprocess], input_columns=["input"])
>>> dataset = dataset.batch(batch_size)
>>> count = 0
>>> for data in dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
... assert data["input"][0] == count
... count += batch_size
... data = {"loss": count}
... dataset.sync_update(condition_name="policy", data=data)
"""
return SyncWaitDataset(self, condition_name, num_batch, callback)
@check_shuffle
def shuffle(self, buffer_size):
"""
Randomly shuffles the rows of this dataset using the following policy:
1. Make a shuffle buffer that contains the first buffer_size rows.
2. Randomly select an element from the shuffle buffer to be the next row
propagated to the child node.
3. Get the next row (if any) from the parent node and put it in the shuffle buffer.
4. Repeat steps 2 and 3 until there are no more rows left in the shuffle buffer.
A random seed can be provided to be used on the first epoch. In every subsequent
epoch, the seed is changed to a new one, randomly generated value.
Args:
buffer_size (int): The size of the buffer (must be larger than 1) for
shuffling. Setting buffer_size equal to the number of rows in the entire
dataset will result in a global shuffle.
Returns:
ShuffleDataset, dataset shuffled.
Raises:
RuntimeError: If exist sync operators before shuffle.
Examples:
>>> # dataset is an instance object of Dataset
>>> # Optionally set the seed for the first epoch
>>> ds.config.set_seed(58)
>>> # Create a shuffled dataset using a shuffle buffer of size 4
>>> dataset = dataset.shuffle(4)
"""
return ShuffleDataset(self, buffer_size)
def flat_map(self, func):
"""
Map `func` to each row in dataset and flatten the result.
The specified `func` is a function that must take one 'Ndarray' as input
and return a 'Dataset'.
Args:
func (function): A function that must take one 'Ndarray' as an argument and
return a 'Dataset'.
Returns:
Dataset, dataset applied by the function.
Examples:
>>> # use NumpySlicesDataset as an example
>>> dataset = ds.NumpySlicesDataset([[0, 1], [2, 3]])
>>>
>>> def flat_map_func(array):
... # create a NumpySlicesDataset with the array
... dataset = ds.NumpySlicesDataset(array)
... # repeat the dataset twice
... dataset = dataset.repeat(2)
... return dataset
>>>
>>> dataset = dataset.flat_map(flat_map_func)
>>> # [[0, 1], [0, 1], [2, 3], [2, 3]]
Raises:
TypeError: If `func` is not a function.
TypeError: If `func` doesn't return a Dataset.
"""
dataset = None
if not hasattr(func, '__call__'):
logger.critical("func must be a function.")
raise TypeError("func must be a function.")
for row_data in self.create_tuple_iterator(output_numpy=True):
if dataset is None:
dataset = func(row_data)
else:
dataset += func(row_data)
if not isinstance(dataset, Dataset):
logger.critical("flat_map must return a Dataset object.")
raise TypeError("flat_map must return a Dataset object.")
return dataset
@check_map
def map(self, operations, input_columns=None, output_columns=None, column_order=None,
num_parallel_workers=None, python_multiprocessing=False, cache=None, callbacks=None, max_rowsize=16):
"""
Apply each operation in operations to this dataset.
The order of operations is determined by the position of each operation in the operations parameter.
operations[0] will be applied first, then operations[1], then operations[2], etc.
Each operation will be passed one or more columns from the dataset as input, and zero or
more columns will be outputted. The first operation will be passed the columns specified
in input_columns as input. If there is more than one operator in operations, the outputted
columns of the previous operation are used as the input columns for the next operation.
The columns outputted by the very last operation will be assigned names specified by
output_columns.
Only the columns specified in column_order will be propagated to the child node. These
columns will be in the same order as specified in column_order.
Args:
operations (Union[list[TensorOp], list[functions]]): List of operations to be
applied on the dataset. Operations are applied in the order they appear in this list.
input_columns (Union[str, list[str]], optional): List of the names of the columns that will be passed to
the first operation as input. The size of this list must match the number of
input columns expected by the first operator. (default=None, the first
operation will be passed however many columns that are required, starting from
the first column).
output_columns (Union[str, list[str]], optional): List of names assigned to the columns outputted by
the last operation. This parameter is mandatory if len(input_columns) !=
len(output_columns). The size of this list must match the number of output
columns of the last operation. (default=None, output columns will have the same
name as the input columns, i.e., the columns will be replaced).
column_order (list[str], optional): Specifies the list of all the columns you need in the whole
dataset. The parameter is required when len(input_column) != len(output_column). Caution: the list here
is not just the columns specified in parameter input_columns and output_columns.
num_parallel_workers (int, optional): Number of threads used to process the dataset in
parallel (default=None, the value from the configuration will be used).
python_multiprocessing (bool, optional): Parallelize Python operations with multiple worker processes. This
option could be beneficial if the Python operation is computational heavy (default=False).
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
callbacks (DSCallback, list[DSCallback], optional): List of Dataset callbacks to be called (Default=None).
max_rowsize(int, optional): Maximum size of row in MB that is used for shared memory allocation to copy
data between processes. This is only used if python_multiprocessing is set to True (default=16).
Returns:
MapDataset, dataset after mapping operation.
Examples:
>>> # dataset is an instance of Dataset which has 2 columns, "image" and "label".
>>>
>>> # Define two operations, where each operation accepts 1 input column and outputs 1 column.
>>> decode_op = c_vision.Decode(rgb=True)
>>> random_jitter_op = c_vision.RandomColorAdjust(brightness=(0.8, 0.8), contrast=(1, 1),
... saturation=(1, 1), hue=(0, 0))
>>>
>>> # 1) Simple map example.
>>>
>>> # Apply decode_op on column "image". This column will be replaced by the outputted
>>> # column of decode_op. Since column_order is not provided, both columns "image"
>>> # and "label" will be propagated to the child node in their original order.
>>> dataset = dataset.map(operations=[decode_op], input_columns=["image"])
>>>
>>> # Decode and rename column "image" to "decoded_image".
>>> dataset = dataset.map(operations=[decode_op], input_columns=["image"], output_columns=["decoded_image"])
>>>
>>> # Specify the order of the output columns.
>>> dataset = dataset.map(operations=[decode_op], input_columns=["image"],
... output_columns=None, column_order=["label", "image"])
>>>
>>> # Rename column "image" to "decoded_image" and also specify the order of the output columns.
>>> dataset = dataset.map(operations=[decode_op], input_columns=["image"],
... output_columns=["decoded_image"], column_order=["label", "decoded_image"])
>>>
>>> # Rename column "image" to "decoded_image" and keep only this column.
>>> dataset = dataset.map(operations=[decode_op], input_columns=["image"],
... output_columns=["decoded_image"], column_order=["decoded_image"])
>>>
>>> # A simple example for mapping pyfunc. Renaming columns and specifying column order
>>> # work in the same way as the previous examples.
>>> dataset = ds.NumpySlicesDataset(data=[[0, 1, 2]], column_names=["data"])
>>> dataset = dataset.map(operations=[(lambda x: x + 1)], input_columns=["data"])
>>>
>>> # 2) Map example with more than one operation.
>>>
>>> # Create a dataset where the images are decoded, then randomly color jittered.
>>> # decode_op takes column "image" as input and outputs one column. The column
>>> # outputted by decode_op is passed as input to random_jitter_op.
>>> # random_jitter_op will output one column. Column "image" will be replaced by
>>> # the column outputted by random_jitter_op (the very last operation). All other
>>> # columns are unchanged. Since column_order is not specified, the order of the
>>> # columns will remain the same.
>>> dataset = dataset.map(operations=[decode_op, random_jitter_op], input_columns=["image"])
>>>
>>> # Rename the column outputted by random_jitter_op to "image_mapped".
>>> # Specifying column order works in the same way as examples in 1).
>>> dataset = dataset.map(operations=[decode_op, random_jitter_op], input_columns=["image"],
... output_columns=["image_mapped"])
>>>
>>> # Map with multiple operations using pyfunc. Renaming columns and specifying column order
>>> # work in the same way as examples in 1).
>>> dataset = ds.NumpySlicesDataset(data=[[0, 1, 2]], column_names=["data"])
>>> dataset = dataset.map(operations=[(lambda x: x * x), (lambda x: x - 1)], input_columns=["data"],
... output_columns=["data_mapped"])
>>>
>>> # 3) Example where number of input columns is not equal to number of output columns.
>>>
>>> # operations[0] is a lambda that takes 2 columns as input and outputs 3 columns.
>>> # operations[1] is a lambda that takes 3 columns as input and outputs 1 column.
>>> # operations[2] is a lambda that takes 1 column as input and outputs 4 columns.
>>> #
>>> # Note: The number of output columns of operation[i] must equal the number of
>>> # input columns of operation[i+1]. Otherwise, this map call will also result
>>> # in an error.
>>> operations = [(lambda x, y: (x, x + y, x + y + 1)),
... (lambda x, y, z: x * y * z),
... (lambda x: (x % 2, x % 3, x % 5, x % 7))]
>>>
>>> # Note: Since the number of input columns is not the same as the number of
>>> # output columns, the output_columns and column_order parameters must be
>>> # specified. Otherwise, this map call will also result in an error.
>>>
>>> dataset = ds.NumpySlicesDataset(data=([[0, 1, 2]], [[3, 4, 5]]), column_names=["x", "y"])
>>>
>>> # Propagate all columns to the child node in this order:
>>> dataset = dataset.map(operations, input_columns=["x", "y"],
... output_columns=["mod2", "mod3", "mod5", "mod7"],
... column_order=["mod2", "mod3", "mod5", "mod7"])
>>>
>>> # Propagate some columns to the child node in this order:
>>> dataset = dataset.map(operations, input_columns=["x", "y"],
... output_columns=["mod2", "mod3", "mod5", "mod7"],
... column_order=["mod7", "mod3", "col2"])
"""
return MapDataset(self, operations, input_columns, output_columns, column_order, num_parallel_workers,
python_multiprocessing, cache, callbacks, max_rowsize)
@check_filter
def filter(self, predicate, input_columns=None, num_parallel_workers=None):
"""
Filter dataset by prediction.
Note:
If input_columns not provided or provided with empty, all columns will be used.
Args:
predicate (callable): Python callable which returns a boolean value. If False then filter the element.
input_columns (Union[str, list[str]], optional): List of names of the input columns, when
default=None, the predicate will be applied on all columns in the dataset.
num_parallel_workers (int, optional): Number of workers to process the dataset
in parallel (default=None).
Returns:
FilterDataset, dataset filtered.
Examples:
>>> # generator data(0 ~ 63)
>>> # filter the data that greater than or equal to 11
>>> dataset = dataset.filter(predicate=lambda data: data < 11, input_columns = ["data"])
"""
return FilterDataset(self, predicate, input_columns, num_parallel_workers)
@check_repeat
def repeat(self, count=None):
"""
Repeat this dataset `count` times. Repeat infinitely if the count is None or -1.
Note:
The order of using repeat and batch reflects the number of batches. It is recommended that
the repeat operation is used after the batch operation.
Args:
count (int): Number of times the dataset is going to be repeated (default=None).
Returns:
RepeatDataset, dataset repeated.
Examples:
>>> # dataset is an instance object of Dataset
>>>
>>> # Create a dataset where the dataset is repeated for 50 epochs
>>> dataset = dataset.repeat(50)
>>>
>>> # Create a dataset where each epoch is shuffled individually
>>> dataset = dataset.shuffle(10)
>>> dataset = dataset.repeat(50)
>>>
>>> # Create a dataset where the dataset is first repeated for
>>> # 50 epochs before shuffling. The shuffle operator will treat
>>> # the entire 50 epochs as one big dataset.
>>> dataset = dataset.repeat(50)
>>> dataset = dataset.shuffle(10)
"""
return RepeatDataset(self, count)
@check_skip
def skip(self, count):
"""
Skip the first N elements of this dataset.
Args:
count (int): Number of elements in the dataset to be skipped.
Returns:
SkipDataset, dataset that containing rows like origin rows subtract skipped rows.
Examples:
>>> # dataset is an instance object of Dataset
>>> # Create a dataset which skips first 3 elements from data
>>> dataset = dataset.skip(3)
"""
return SkipDataset(self, count)
@check_take
def take(self, count=-1):
"""
Takes at most given numbers of elements from the dataset.
Note:
1. If count is greater than the number of elements in the dataset or equal to -1,
all the elements in dataset will be taken.
2. The order of using take and batch matters. If take is before batch operation,
then take given number of rows; otherwise take given number of batches.
Args:
count (int, optional): Number of elements to be taken from the dataset (default=-1).
Returns:
TakeDataset, dataset taken.
Examples:
>>> # dataset is an instance object of Dataset
>>> # Create a dataset where the dataset includes 50 elements.
>>> dataset = dataset.take(50)
"""
return TakeDataset(self, count)
def _get_absolute_split_sizes(self, sizes):
"""
Internal method called by split to calculate absolute split sizes and to
do some error checking after calculating absolute split sizes.
Returns:
int, absolute split sizes of the dataset.
"""
# Call get_dataset_size here and check input here because
# don't want to call this once in check_split and another time in
# here again
dataset_size = self.get_dataset_size()
if dataset_size is None or dataset_size <= 0:
raise RuntimeError("dataset_size is unknown, unable to split.")
if not isinstance(sizes, list):
raise RuntimeError("sizes must be a list.")
all_int = all(isinstance(item, int) for item in sizes)
if all_int:
sizes_sum = sum(sizes)
if sizes_sum != dataset_size:
raise RuntimeError("Sum of split sizes {} is not equal to dataset size {}."
.format(sizes_sum, dataset_size))
return sizes
absolute_sizes = []
for item in sizes:
absolute_size = int(round(item * dataset_size))
if absolute_size == 0:
raise RuntimeError("Split percentage {} is too small.".format(item))
absolute_sizes.append(absolute_size)
absolute_sizes_sum = sum(absolute_sizes)
# if we still need more rows, give them to the first split.
# if we have too many rows, remove the extras from the first split that has
# enough rows.
size_difference = int(dataset_size - absolute_sizes_sum)
if size_difference > 0:
absolute_sizes[0] += size_difference
else:
for i, _ in enumerate(absolute_sizes):
if absolute_sizes[i] + size_difference > 0:
absolute_sizes[i] += size_difference
break
if sum(absolute_sizes) != dataset_size:
raise RuntimeError("Sum of calculated split sizes {} is not equal to dataset size {}."
.format(absolute_sizes_sum, dataset_size))
return absolute_sizes
@check_split
def split(self, sizes, randomize=True):
"""
Split the dataset into smaller, non-overlapping datasets.
This is a general purpose split function which can be called from any operator in the pipeline.
There is another, optimized split function, which will be called automatically if ds.split is
called where ds is a MappableDataset.
Args:
sizes (Union[list[int], list[float]]): If a list of integers [s1, s2, …, sn] is
provided, the dataset will be split into n datasets of size s1, size s2, …, size sn
respectively. If the sum of all input sizes does not equal the original dataset size, an
error will throw.
If a list of floats [f1, f2, …, fn] is provided, all floats must be between 0 and 1
and must sum to 1, otherwise an error will throw. The dataset will be split into n
Datasets of size round(f1*K), round(f2*K), …, round(fn*K) where K is the size of the
original dataset.
If after rounding:
- Any size equals 0, an error will occur.
- The sum of split sizes < K, the difference of K - sigma(round(fi * k)) will be added to the first
split.
- The sum of split sizes > K, the difference of sigma(round(fi * K)) - K will be removed from the first
large enough split such that it will have at least 1 row after removing the difference.
randomize (bool, optional): Determines whether or not to split the data randomly (default=True).
If True, the data will be randomly split. Otherwise, each split will be created with
consecutive rows from the dataset.
Note:
1. Dataset cannot be sharded if split is going to be called.
2. It is strongly recommended to not shuffle the dataset, but use randomize=True instead.
Shuffling the dataset may not be deterministic, which means the data in each split
will be different in each epoch.
Raises:
RuntimeError: If get_dataset_size returns None or is not supported for this dataset.
RuntimeError: If `sizes` is list of integers and sum of all elements in sizes does not
equal the dataset size.
RuntimeError: If `sizes` is list of float and there is a split with size 0 after calculations.
RuntimeError: If the dataset is sharded prior to calling split.
ValueError: If `sizes` is list of float and not all floats are between 0 and 1, or if the
floats don't sum to 1.
Returns:
tuple(Dataset), a tuple of datasets that have been split.
Examples:
>>> # TextFileDataset is not a mappable dataset, so this non-optimized split will be called.
>>> # Since many datasets have shuffle on by default, set shuffle to False if split will be called!
>>> dataset = ds.TextFileDataset(text_file_dataset_dir, shuffle=False)
>>> train_dataset, test_dataset = dataset.split([0.9, 0.1])
"""
if self.is_shuffled():
logger.warning("Dataset is shuffled before split.")
if self.is_sharded():
raise RuntimeError("Dataset should not be sharded before split.")
absolute_sizes = self._get_absolute_split_sizes(sizes)
splits = []
rows_to_skip = 0
for size in absolute_sizes:
ds = copy.deepcopy(self)
if randomize:
# want to shuffle the same way every epoch before split
# in alter_tree, shuffle buffer is minimum 10000, so use 10000 here
ds = ds.shuffle(10000)
ds.reshuffle_each_epoch = False
if rows_to_skip > 0:
ds = ds.skip(rows_to_skip)
ds = ds.take(size)
splits.append(ds)
rows_to_skip += size
return tuple(splits)
@check_zip_dataset
def zip(self, datasets):
"""
Zip the datasets in the sense of input tuple of datasets. Columns in the input datasets must have different
name.
Args:
datasets (Union[tuple, class Dataset]): A tuple of datasets or a single class Dataset
to be zipped together with this dataset.
Returns:
ZipDataset, dataset zipped.
Examples:
>>> # Create a dataset which is the combination of dataset and dataset_1
>>> dataset = dataset.zip(dataset_1)
"""
if isinstance(datasets, tuple):
datasets = (self, *datasets)
elif isinstance(datasets, Dataset):
datasets = (self, datasets)
else:
raise TypeError("Invalid datasets, expected Dataset object or tuple of Dataset, but got %s!" % datasets)
return ZipDataset(datasets)
@check_concat
def concat(self, datasets):
"""
Concatenate the dataset objects in the input list.
Performing "+" operation on dataset objects can achieve the same effect.
Note:
The column name, and rank and type of the column data must be the same in the input datasets.
Args:
datasets (Union[list, class Dataset]): A list of datasets or a single class Dataset
to be concatenated together with this dataset.
Returns:
ConcatDataset, dataset concatenated.
Examples:
>>> # Create a dataset by concatenating dataset_1 and dataset_2 with "+" operator
>>> dataset = dataset_1 + dataset_2
>>> # Create a dataset by concatenating dataset_1 and dataset_2 with concat operation
>>> dataset = dataset_1.concat(dataset_2)
"""
if isinstance(datasets, Dataset):
datasets = [self] + [datasets]
elif isinstance(datasets, list):
datasets = [self] + datasets
else:
raise TypeError("Invalid datasets, expected Dataset object or list of Dataset, but got %s!" % datasets)
return ConcatDataset(datasets)
@check_rename
def rename(self, input_columns, output_columns):
"""
Rename the columns in input datasets.
Args:
input_columns (Union[str, list[str]]): List of names of the input columns.
output_columns (Union[str, list[str]]): List of names of the output columns.
Returns:
RenameDataset, dataset renamed.
Examples:
>>> # dataset is an instance object of Dataset
>>> input_columns = ["input_col1", "input_col2", "input_col3"]
>>> output_columns = ["output_col1", "output_col2", "output_col3"]
>>>
>>> # Create a dataset where input_col1 is renamed to output_col1, and
>>> # input_col2 is renamed to output_col2, and input_col3 is renamed
>>> # to output_col3.
>>> dataset = dataset.rename(input_columns=input_columns, output_columns=output_columns)
"""
return RenameDataset(self, input_columns, output_columns)
@check_project
def project(self, columns):
"""
Project certain columns in input dataset.
The specified columns will be selected from the dataset and passed into
the pipeline with the order specified. The other columns are discarded.
Args:
columns(Union[str, list[str]]): List of names of the columns to project.
Returns:
ProjectDataset, dataset projected.
Examples:
>>> # dataset is an instance object of Dataset
>>> columns_to_project = ["column3", "column1", "column2"]
>>>
>>> # Create a dataset that consists of column3, column1, column2
>>> # in that order, regardless of the original order of columns.
>>> dataset = dataset.project(columns=columns_to_project)
"""
return ProjectDataset(self, columns)
def build_vocab(self, columns, freq_range, top_k, special_tokens, special_first):
"""
Function to create a Vocab from source dataset
Build a vocab from a dataset. This would collect all the unique words in a dataset and return a vocab
which contains top_k most frequent words (if top_k is specified)
Args:
columns(Union[str, list[str]]): Column names to get words from.
freq_range(tuple[int]): A tuple of integers (min_frequency, max_frequency). Words within the frequency
range will be stored.
Naturally 0 <= min_frequency <= max_frequency <= total_words. min_frequency/max_frequency
can be set to default, which corresponds to 0/total_words separately.
top_k(int): Number of words to be built into vocab. top_k most frequent words are
taken. The top_k is taken after freq_range. If not enough top_k, all words will be taken
special_tokens(list[str]): A list of strings, each one is a special token.
special_first(bool): Whether special_tokens will be prepended/appended to vocab, If special_tokens
is specified and special_first is set to default, special_tokens will be prepended.
Returns:
Vocab, vocab built from the dataset.
Examples:
>>> import numpy as np
>>>
>>> def gen_corpus():
... # key: word, value: number of occurrences, reason for using letters is so their order is apparent
... corpus = {"Z": 4, "Y": 4, "X": 4, "W": 3, "U": 3, "V": 2, "T": 1}
... for k, v in corpus.items():
... yield (np.array([k] * v, dtype='S'),)
>>> column_names = ["column1"]
>>> dataset = ds.GeneratorDataset(gen_corpus, column_names)
>>> dataset = dataset.build_vocab(columns=["column1"],
... freq_range=(1, 10), top_k=5,
... special_tokens=["<pad>", "<unk>"],
... special_first=True)
"""
vocab = cde.Vocab()
columns = replace_none(columns, [])
if not isinstance(columns, list):
columns = [columns]
freq_range = replace_none(freq_range, (0, 9223372036854775807))
if freq_range[0] is None:
freq_range = (0, freq_range[1])
if freq_range[1] is None:
freq_range = (freq_range[0], 9223372036854775807)
special_tokens = replace_none(special_tokens, [])
top_k = replace_none(top_k, 9223372036854775807)
ir_tree, api_tree = self.create_ir_tree()
# vocab node
vocab_node = cde.BuildVocabNode(ir_tree, vocab, columns, freq_range, top_k, special_tokens, special_first)
runtime_context = cde.PythonRuntimeContext()
runtime_context.Init()
# build vocab
consumer = cde.PythonBuildVocabConsumer()
consumer.Init(vocab_node)
runtime_context.AssignConsumer(consumer)
consumer.Start()
del api_tree
return vocab
def build_sentencepiece_vocab(self, columns, vocab_size, character_coverage, model_type, params):
"""
Function to create a SentencePieceVocab from source dataset
Build a SentencePieceVocab from a dataset.
Args:
columns(list[str]): Column names to get words from.
vocab_size(int): Vocabulary size.
character_coverage(int): Percentage of characters covered by the model, must be between
0.98 and 1.0 Good defaults are: 0.9995 for languages with rich character sets like
Japanese or Chinese character sets, and 1.0 for other languages with small character sets
like English or Latin.
model_type(SentencePieceModel): Model type. Choose from unigram (default), bpe, char, or word.
The input sentence must be pretokenized when using word type.
params(dict): Any extra optional parameters of sentencepiece library according to your raw data
Returns:
SentencePieceVocab, vocab built from the dataset.
Examples:
>>> from mindspore.dataset.text import SentencePieceModel
>>>
>>> # DE_C_INTER_SENTENCEPIECE_MODE is a mapping dict
>>> from mindspore.dataset.text.utils import DE_C_INTER_SENTENCEPIECE_MODE
>>> dataset = ds.TextFileDataset("/path/to/sentence/piece/vocab/file", shuffle=False)
>>> dataset = dataset.build_sentencepiece_vocab(["text"], 5000, 0.9995,
... DE_C_INTER_SENTENCEPIECE_MODE[SentencePieceModel.UNIGRAM],
... {})
"""
vocab = cde.SentencePieceVocab()
ir_tree, api_tree = self.create_ir_tree()
# vocab node
vocab_node = cde.BuildSentenceVocabNode(ir_tree, vocab, columns, vocab_size, character_coverage, model_type,
params)
runtime_context = cde.PythonRuntimeContext()
runtime_context.Init()
# build vocab
consumer = cde.PythonBuildVocabConsumer()
consumer.Init(vocab_node)
runtime_context.AssignConsumer(consumer)
consumer.Start()
del api_tree
return vocab
def apply(self, apply_func):
"""
Apply a function in this dataset.
Args:
apply_func (function): A function that must take one 'Dataset' as an argument and
return a preprocessed 'Dataset'.
Returns:
Dataset, dataset applied by the function.
Examples:
>>> # dataset is an instance object of Dataset
>>>
>>> # Declare an apply_func function which returns a Dataset object
>>> def apply_func(data):
... data = data.batch(2)
... return data
>>>
>>> # Use apply to call apply_func
>>> dataset = dataset.apply(apply_func)
Raises:
TypeError: If apply_func is not a function.
TypeError: If apply_func doesn't return a Dataset.
"""
if not hasattr(apply_func, '__call__'):
raise TypeError("apply_func must be a function.")
dataset = apply_func(self)
if not isinstance(dataset, Dataset):
raise TypeError("apply_func must return a dataset.")
return dataset
@check_device_send
def device_que(self, send_epoch_end=True, create_data_info_queue=False):
"""
Return a transferred Dataset that transfers data through a device.
Args:
send_epoch_end (bool, optional): Whether to send end of sequence to device or not (default=True).
create_data_info_queue (bool, optional): Whether to create queue which stores
types and shapes of data or not(default=False).
Note:
If device is Ascend, features of data will be transferred one by one. The limitation
of data transmission per time is 256M.
Returns:
TransferDataset, dataset for transferring.
"""
return self.to_device(send_epoch_end=send_epoch_end, create_data_info_queue=create_data_info_queue)
@check_device_send
def to_device(self, send_epoch_end=True, create_data_info_queue=False):
"""
Transfer data from CPU to GPU or Ascend or other devices.
Args:
send_epoch_end (bool, optional): Whether to send the end of sequence to device or not (default=True).
create_data_info_queue (bool, optional): Whether to create queue which stores
types and shapes of data or not(default=False).
Note:
If device is Ascend, features of data will be transferred one by one. The limitation
of data transmission per second is 256M.
Returns:
TransferDataset, dataset for transferring.
Raises:
RuntimeError: If distribution file path is given but failed to read.
"""
return TransferDataset(self, send_epoch_end, create_data_info_queue)
@check_save
def save(self, file_name, num_files=1, file_type='mindrecord'):
"""
Save the dynamic data processed by the dataset pipeline in common dataset format.
Supported dataset formats: 'mindrecord' only
Implicit type casting exists when saving data as 'mindrecord'. The transform table shows how to do type casting.
.. list-table:: Implicit Type Casting when Saving as 'mindrecord'
:widths: 25 25 50
:header-rows: 1
* - Type in 'dataset'
- Type in 'mindrecord'
- Details
* - bool
- None
- Not supported
* - int8
- int32
-
* - uint8
- bytes(1D uint8)
- Drop dimension
* - int16
- int32
-
* - uint16
- int32
-
* - int32
- int32
-
* - uint32
- int64
-
* - int64
- int64
-
* - uint64
- None
- Not supported
* - float16
- float32
-
* - float32
- float32
-
* - float64
- float64
-
* - string
- string
- Multi-dimensional string not supported
Note:
1. To save the samples in order, set dataset's shuffle to False and num_files to 1.
2. Before calling the function, do not use batch operator, repeat operator or data augmentation operators
with random attribute in map operator.
3. When array dimension is variable, one-dimensional arrays or
multi-dimensional arrays with variable dimension 0 are supported.
4. Mindrecord does not support DE_UINT64, multi-dimensional DE_UINT8(drop dimension) nor
multi-dimensional DE_STRING.
Args:
file_name (str): Path to dataset file.
num_files (int, optional): Number of dataset files (default=1).
file_type (str, optional): Dataset format (default='mindrecord').
"""
ir_tree, api_tree = self.create_ir_tree()
runtime_context = cde.PythonRuntimeContext()
runtime_context.Init()
consumer = cde.PythonSaveToDisk(file_name, num_files, file_type)
consumer.Init(ir_tree)
runtime_context.AssignConsumer(consumer)
consumer.Save()
_set_dataset_permissions(file_name, num_files)
del api_tree
@check_tuple_iterator
def create_tuple_iterator(self, columns=None, num_epochs=-1, output_numpy=False, do_copy=True):
"""
Create an iterator over the dataset. The datatype retrieved back will be a list of ndarrays.
To specify which columns to list and the order needed, use columns_list. If columns_list
is not provided, the order of the columns will remain unchanged.
Args:
columns (list[str], optional): List of columns to be used to specify the order of columns
(default=None, means all columns).
num_epochs (int, optional): Maximum number of epochs that iterator can be iterated.
(default=-1, iterator can be iterated infinite number of epochs)
output_numpy (bool, optional): Whether or not to output NumPy datatype.
If output_numpy=False, iterator will output MSTensor (default=False).
do_copy (bool, optional): when output data type is mindspore.Tensor,
use this param to select the conversion method, only take False for better performance (default=True).
Returns:
TupleIterator, tuple iterator over the dataset.
Examples:
>>> # dataset is an instance object of Dataset
>>> iterator = dataset.create_tuple_iterator()
>>> for item in iterator:
... # item is a list
... print(type(item))
... break
<class 'list'>
"""
if output_numpy is None:
output_numpy = False
if Dataset._noop_mode():
return DummyIterator(self, 'tuple')
return TupleIterator(self, columns, num_epochs, output_numpy, do_copy)
@check_dict_iterator
def create_dict_iterator(self, num_epochs=-1, output_numpy=False):
"""
Create an iterator over the dataset. The data retrieved will be a dictionary datatype.
The order of the columns in the dictionary may not be the same as the original order.
Args:
num_epochs (int, optional): Maximum number of epochs that iterator can be iterated
(default=-1, iterator can be iterated infinite number of epochs).
output_numpy (bool, optional): Whether or not to output NumPy datatype,
if output_numpy=False, iterator will output MSTensor (default=False).
Returns:
DictIterator, dictionary iterator over the dataset.
Examples:
>>> # dataset is an instance object of Dataset
>>> iterator = dataset.create_dict_iterator()
>>> for item in iterator:
... # item is a dict
... print(type(item))
... break
<class 'dict'>
"""
if output_numpy is None:
output_numpy = False
if Dataset._noop_mode():
return DummyIterator(self, 'dict')
return DictIterator(self, num_epochs, output_numpy)
def __iter__(self):
"""Create an iterator over the dataset."""
return self.create_tuple_iterator(num_epochs=1)
@property
def input_indexs(self):
"""
Get Input Index Information
Returns:
tuple, tuple of the input index information.
Examples:
>>> # dataset is an instance object of Dataset
>>> # set input_indexs
>>> dataset.input_indexs = 10
>>> print(dataset.input_indexs)
10
"""
if self._input_indexs != ():
return self._input_indexs
# find input_indexes of children
children_input_index = [child.input_indexs for child in self.children]
# in case of more than one child, return the first input_indexes
for cix in children_input_index:
if cix != ():
return cix
# if all children's input_indexes are () or the node is a leaf
return self._input_indexs
@input_indexs.setter
def input_indexs(self, value):
self._input_indexs = value
def copy_batch_size(self, value):
self._batch_size = value
def _init_tree_getters(self):
"""
Get pipeline information.
"""
ir_tree, api_tree = self.create_ir_tree()
runtime_context = cde.PythonRuntimeContext()
runtime_context.Init()
getter = cde.TreeGetters()
getter.Init(ir_tree)
runtime_context.AssignConsumer(getter)
return getter, runtime_context, api_tree
def __init_size_getter(self):
"""
Get pipeline information.
"""
ir_tree, api_tree = self.create_ir_tree()
runtime_context = cde.PythonRuntimeContext()
runtime_context.Init()
getter = cde.DatasetSizeGetters()
getter.Init(ir_tree)
runtime_context.AssignConsumer(getter)
return getter, runtime_context, api_tree
def get_col_names(self):
"""
Return the names of the columns in dataset.
Returns:
list, list of column names in the dataset.
Examples:
>>> # dataset is an instance object of Dataset
>>> col_names = dataset.get_col_names()
"""
if self._col_names is None:
runtime_getter = self._init_tree_getters()
self._col_names = runtime_getter[0].GetColumnNames()
self.close_pool()
runtime_getter[2].notify_watchdog()
return self._col_names
def output_shapes(self):
"""
Get the shapes of output data.
Returns:
list, list of shapes of each column.
Examples:
>>> # dataset is an instance object of Dataset
>>> output_shapes = dataset.output_shapes()
"""
if self.saved_output_shapes is None:
runtime_getter = self._init_tree_getters()
self.saved_output_shapes = runtime_getter[0].GetOutputShapes()
self.saved_output_types = runtime_getter[0].GetOutputTypes()
self.close_pool()
runtime_getter[2].notify_watchdog()
if self.dynamic_setting[0]:
self.saved_output_shapes, self.saved_min_shapes, self.saved_max_shapes = self._dynamic_output_shapes()
return self.saved_output_shapes
def output_types(self):
"""
Get the types of output data.
Returns:
list, list of data types.
Examples:
>>> # dataset is an instance object of Dataset
>>> output_types = dataset.output_types()
"""
if self.saved_output_types is None:
runtime_getter = self._init_tree_getters()
self.saved_output_shapes = runtime_getter[0].GetOutputShapes()
self.saved_output_types = runtime_getter[0].GetOutputTypes()
self.close_pool()
runtime_getter[2].notify_watchdog()
if self.dynamic_setting[0]:
self.saved_output_shapes, self.saved_min_shapes, self.saved_max_shapes = self._dynamic_output_shapes()
return self.saved_output_types
def get_dataset_size(self):
"""
Return the number of batches in an epoch.
Returns:
int, number of batches.
Examples:
>>> # dataset is an instance object of Dataset
>>> dataset_size = dataset.get_dataset_size()
"""
if self.dataset_size is None:
runtime_getter = self.__init_size_getter()
self.dataset_size = runtime_getter[0].GetDatasetSize(False)
self.close_pool()
runtime_getter[2].notify_watchdog()
return self.dataset_size
def set_dynamic_columns(self, columns=None):
"""
Set dynamic shape information of source data, it should be set after the pipeline is defined.
Args:
columns (dict): A dict contains shape information of each column in dataset.
The value of shape[i] is :py:obj:`None` indicates that the data length of shape[i] is dynamic.
Examples:
>>> import numpy as np
>>>
>>> def generator1():
>>> for i in range(1, 100):
>>> yield np.ones((16, i, 83)), np.array(i)
>>>
>>> dataset = ds.GeneratorDataset(generator1, ["data1", "data2"])
>>> dataset.set_dynamic_columns(columns={"data1": [16, None, 83], "data2": []})
"""
if not isinstance(columns, dict):
raise TypeError("Pass a dict to set dynamic shape, example: {\"data1\": [16, None, 256]}")
self.dynamic_setting[0] = True
self.dynamic_setting[1] = columns
def dynamic_min_max_shapes(self):
"""
Get minimum and maximum data length of dynamic source data, for dynamic graph compilation.
Returns:
lists, min_shapes, max_shapes of source data.
Examples:
>>> import numpy as np
>>>
>>> def generator1():
>>> for i in range(1, 100):
>>> yield np.ones((16, i, 83)), np.array(i)
>>>
>>> dataset = ds.GeneratorDataset(generator1, ["data1", "data2"])
>>> dataset.set_dynamic_columns(columns={"data1": [16, None, 83], "data2": []})
>>> min_shapes, max_shapes = dataset.dynamic_min_max_shapes()
"""
if self.saved_min_shapes is None or self.saved_max_shapes is None:
self.saved_output_shapes, self.saved_min_shapes, self.saved_max_shapes = self._dynamic_output_shapes()
return self.saved_min_shapes, self.saved_max_shapes
@staticmethod
def __check_dynamic_column_name(dynamic_columns, dataset_columns):
for column in dynamic_columns:
if column not in dataset_columns:
raise RuntimeError("dynamic column [" + column + "] does not match any column in dataset: " +
str(dataset_columns))
@staticmethod
def __check_dynamic_column_shape(data, col, dynamic_columns):
shape_mismatch = "dynamic column [" + col + "] with shape " + str(dynamic_columns[col]) + \
" does not match dataset column [" + col + "] with shape " + str(list(data[col].shape))
if data[col].ndim != len(dynamic_columns[col]):
raise RuntimeError(shape_mismatch)
for dim in range(len(dynamic_columns[col])):
if dynamic_columns[col][dim] is not None and dynamic_columns[col][dim] != data[col].shape[dim]:
raise RuntimeError(shape_mismatch)
def _dynamic_output_shapes(self):
"""
Get dynamic information of source data.
Returns:
lists, dynamic_shapes, min_shapes, max_shapes of source data.
"""
if not self.dynamic_setting[1]:
raise RuntimeError("dynamic_columns is not set, call set_dynamic_columns() by final Dataset Op.")
if self.saved_output_shapes is not None and self.saved_min_shapes is not None and \
self.saved_max_shapes is not None:
return self.saved_output_shapes, self.saved_min_shapes, self.saved_max_shapes
logger.warning("Calculating dynamic shape of input data, this will take a few minutes...")
# Assume data1 shape is dynamic, data2 shape is fix
# {"data1": [batch_size, None, feat_len], "data2": [batch_size, feat_len]}
dynamic_columns = self.dynamic_setting[1]
# ["data1", "data2"]
dataset_columns = self.get_col_names()
Dataset.__check_dynamic_column_name(dynamic_columns, dataset_columns)
# Shape[1] of data1 is variable
# {"data1": {(batch_size, 100, feat_len), (16, 200, 83)}, "data2": {(batch_size, feat_len)}}
column_shape_set = {col: set() for col in dataset_columns}
dataset_size_counter = 0
for data in self.create_dict_iterator(num_epochs=1, output_numpy=True):
dataset_size_counter += 1
for col in data.keys():
if col in dynamic_columns:
Dataset.__check_dynamic_column_shape(data, col, dynamic_columns)
column_shape_set[col].add(tuple(data[col].shape))
# we get dataset_size after dryrun
self.dataset_size = dataset_size_counter
min_shapes, max_shapes, dynamic_shapes = list(), list(), list()
for col, shape_set in column_shape_set.items():
if len(shape_set) > 1:
if col not in dynamic_columns:
raise RuntimeError("column [" + col + "] has dynamic shape but not set by set_dynamic_columns()" +
", shapes of [" + col + "]: " + str(list(shape_set)))
shape_npy = np.array(list(shape_set))
max_shape = shape_npy.max(axis=0)
min_shape = shape_npy.min(axis=0)
# Set min shape to 1 due to unknown shuffle
min_shape = np.where(np.equal(dynamic_columns[col], None), 1, min_shape)
# Set dynamic dim to -1 for ME
dynamic_shape = np.where(np.equal(dynamic_columns[col], None), -1, dynamic_columns[col])
max_shapes.append(max_shape.tolist())
min_shapes.append(min_shape.tolist())
dynamic_shapes.append(dynamic_shape.tolist())
else:
# Also append fix shape to keep order of column shape
fix_shape = list(list(shape_set)[0])
max_shapes.append(fix_shape)
min_shapes.append(fix_shape)
dynamic_shapes.append(fix_shape)
if col in dynamic_columns:
logger.warning("column [" + col + "] has no dynamic shape but set by set_dynamic_columns()")
# Set min shape to 1 due to unknown shuffle
min_shapes[-1] = np.where(np.equal(dynamic_columns[col], None), 1, fix_shape).tolist()
# Set dynamic dim to -1 for ME
dynamic_shapes[-1] = np.where(np.equal(dynamic_columns[col], None), -1, fix_shape).tolist()
return dynamic_shapes, min_shapes, max_shapes
def num_classes(self):
"""
Get the number of classes in a dataset.
Returns:
int, number of classes.
Examples:
>>> # dataset is an instance object of Dataset
>>> num_classes = dataset.num_classes()
"""
if self._num_classes is None:
runtime_getter = self._init_tree_getters()
self._num_classes = runtime_getter[0].GetNumClasses()
self.close_pool()
runtime_getter[2].notify_watchdog()
if self._num_classes == -1:
return None
return self._num_classes
def get_sync_notifiers(self):
if self.children:
return self.children[0].get_sync_notifiers()
return {}
def disable_sync(self):
if self.children:
return self.children[0].disable_sync()
return {}
def is_sync(self):
if self.children:
return self.children[0].is_sync()
return False
def sync_update(self, condition_name, num_batch=None, data=None):
"""
Release a blocking condition and trigger callback with given data.
Args:
condition_name (str): The condition name that is used to toggle sending next row.
num_batch (Union[int, None]): The number of batches (rows) that are released.
When num_batch is None, it will default to the number specified by the
sync_wait operator (default=None).
data (Any): The data passed to the callback, user defined (default=None).
"""
if (not isinstance(num_batch, int) and num_batch is not None) or \
(isinstance(num_batch, int) and num_batch <= 0):
# throwing exception, disable all sync_wait in pipeline
self.disable_sync()
raise RuntimeError("Sync_update batch size can only be positive integer, got : {}.".format(num_batch))
notifiers_dict = self.get_sync_notifiers()
if not isinstance(condition_name, str):
raise TypeError("Argument condition_name with value {} is not of type str, but got {}."
.format(condition_name, type(condition_name)))
if condition_name not in notifiers_dict:
# throwing exception, disable all sync_wait in pipeline
self.disable_sync()
raise RuntimeError("Condition name not found.")
if num_batch is not None:
num_batch *= self.get_batch_size()
notifiers_dict[condition_name](num_batch, data)
def get_batch_size(self):
"""
Return the size of batch.
Returns:
int, the number of data in a batch.
Examples:
>>> # dataset is an instance object of Dataset
>>> batch_size = dataset.get_batch_size()
"""
if self._batch_size is None:
runtime_getter = self._init_tree_getters()
self._batch_size = runtime_getter[0].GetBatchSize()
if self._batch_size is None:
self._batch_size = 1
return self._batch_size
def get_repeat_count(self):
"""
Get the replication times in RepeatDataset (default is 1).
Returns:
int, the count of repeat.
Examples:
>>> # dataset is an instance object of Dataset
>>> repeat_count = dataset.get_repeat_count()
"""
if self._repeat_count is None:
runtime_getter = self._init_tree_getters()
self._repeat_count = runtime_getter[0].GetRepeatCount()
if self._repeat_count is None:
self._repeat_count = 1
return self._repeat_count
def get_class_indexing(self):
"""
Return the class index.
Returns:
dict, a str-to-int mapping from label name to index.
dict, a str-to-list<int> mapping from label name to index for Coco ONLY. The second number
in the list is used to indicate the super category.
Examples:
>>> # dataset is an instance object of Dataset
>>> class_indexing = dataset.get_class_indexing()
"""
if self.children:
return self.children[0].get_class_indexing()
return {}
def reset(self):
"""Reset the dataset for next epoch."""
def is_shuffled(self):
"""Returns True if the dataset or its children is shuffled."""
for input_dataset in self.children:
if input_dataset.is_shuffled():
return True
return False
def is_sharded(self):
"""Returns True if the dataset or its children is sharded."""
for input_dataset in self.children:
if input_dataset.is_sharded():
return True
return False
def parse(self, children=None):
raise NotImplementedError("Dataset has to implement parse method.")
def post_parse(self, ir_node):
if self.cache:
ir_node = ir_node.set_cache_client(self.cache.cache_client)
if self.num_parallel_workers:
ir_node = ir_node.set_num_workers(self.num_parallel_workers)
return ir_node
class SourceDataset(Dataset):
"""
Abstract class to represent a source dataset which produces content to the data pipeline.
"""
def __init__(self, num_parallel_workers=None, num_samples=None, shuffle=True, num_shards=None, shard_id=None,
cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, cache=cache)
self.num_samples = replace_none(num_samples, 0)
self.num_shards = replace_none(num_shards, 1)
self.shard_id = replace_none(shard_id, 0)
if shuffle is not None and not isinstance(shuffle, (bool, Shuffle)):
raise TypeError("shuffle must be of boolean or enum of 'Shuffle' values like 'Shuffle.GLOBAL' or "
"'Shuffle.FILES' or 'Shuffle.INFILE'.")
self.shuffle_flag = 2 # Global shuffle
if not isinstance(shuffle, Shuffle):
if shuffle is None or shuffle:
self.shuffle_flag = 2 # Global shuffle
else:
self.shuffle_flag = 0 # No shuffle
else:
if shuffle == Shuffle.GLOBAL:
self.shuffle_flag = 2 # Global shuffle
elif shuffle == Shuffle.FILES:
self.shuffle_flag = 1 # Files shuffle
elif shuffle == Shuffle.INFILE:
self.shuffle_flag = 3 # Infile shuffle
def parse(self, children=None):
raise NotImplementedError("Dataset has to implement parse method.")
@staticmethod
def _find_files(patterns):
"""
Utility function to search for files with the given glob patterns.
Args:
patterns (Union[str, list[str]]): String or list of patterns to be searched.
Returns:
list, list of files.
"""
if not isinstance(patterns, list):
patterns = [patterns]
file_list = []
unmatched_patterns = []
for pattern in patterns:
matches = [match for match in glob.glob(pattern, recursive=True) if os.path.isfile(match)]
if matches:
file_list.extend(matches)
else:
unmatched_patterns.append(pattern)
if unmatched_patterns:
raise ValueError("The following patterns did not match any files: {}.".format(unmatched_patterns))
if file_list: # not empty
return file_list
raise ValueError("The list of path names matching the patterns is empty.")
def is_shuffled(self):
return self.shuffle_flag > 0
def is_sharded(self):
if self.num_shards is not None:
return self.num_shards > 1
return False
class MappableDataset(SourceDataset):
"""
Abstract class to represent a source dataset which supports use of samplers.
"""
def parse(self, children=None):
raise NotImplementedError("Dataset has to implement parse method.")
def __init__(self, num_parallel_workers=None, sampler=None, num_samples=None, shuffle=None, num_shards=None,
shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,
num_shards=num_shards, shard_id=shard_id, cache=cache)
self.shuffle_flag = replace_none(shuffle, True)
self.sampler = samplers.select_sampler(num_samples, sampler, shuffle, num_shards, shard_id)
def add_sampler(self, new_sampler):
"""
Add a sampler for current dataset,.
Args:
new_sampler (Sampler): The sampler to be added as the parent sampler for current dataset.
Examples:
>>> # dataset is an instance object of Dataset
>>> # use a DistributedSampler instead
>>> new_sampler = ds.DistributedSampler(10, 2)
>>> dataset.add_sampler(new_sampler)
"""
# note: By adding a sampler, the sampled IDs will flow to new_sampler
# after first passing through the current samplers attached to this dataset.
self.dataset_size = None
new_sampler.add_child(self.sampler)
self.sampler = new_sampler
def use_sampler(self, new_sampler):
"""
Make the current dataset use the new_sampler provided by other API.
Args:
new_sampler (Sampler): The sampler to use for the current dataset.
Examples:
>>> # dataset is an instance object of Dataset
>>> # use a DistributedSampler instead
>>> new_sampler = ds.DistributedSampler(10, 2)
>>> dataset.use_sampler(new_sampler)
"""
if new_sampler is None:
raise TypeError("Input sampler can not be None.")
if not isinstance(new_sampler, (samplers.BuiltinSampler, samplers.Sampler)):
raise TypeError("Input sampler is not an instance of a sampler.")
self.dataset_size = None
self.sampler = self.sampler.child_sampler
self.add_sampler(new_sampler)
def is_shuffled(self):
return self.sampler.is_shuffled()
def is_sharded(self):
return self.sampler.is_sharded()
@check_split
def split(self, sizes, randomize=True):
"""
Split the dataset into smaller, non-overlapping datasets.
Args:
sizes (Union[list[int], list[float]]): If a list of integers [s1, s2, …, sn] is
provided, the dataset will be split into n datasets of size s1, size s2, …, size sn
respectively. If the sum of all sizes does not equal the original dataset size, an
error will occur.
If a list of floats [f1, f2, …, fn] is provided, all floats must be between 0 and 1
and must sum to 1, otherwise an error will occur. The dataset will be split into n
Datasets of size round(f1*K), round(f2*K), …, round(fn*K) where K is the size of the
original dataset.
If after rounding:
- Any size equals 0, an error will occur.
- The sum of split sizes < K, the difference will be added to the first split.
- The sum of split sizes > K, the difference will be removed from the first large
enough split such that it will have at least 1 row after removing the difference.
randomize (bool, optional): Determines whether or not to split the data randomly (default=True).
If True, the data will be randomly split. Otherwise, each split will be created with
consecutive rows from the dataset.
Note:
1. There is an optimized split function, which will be called automatically when the dataset
that calls this function is a MappableDataset.
2. Dataset should not be sharded if split is going to be called. Instead, create a
DistributedSampler and specify a split to shard after splitting. If the dataset is
sharded after a split, it is strongly recommended setting the same seed in each instance
of execution, otherwise each shard may not be part of the same split (see Examples).
3. It is strongly recommended to not shuffle the dataset, but use randomize=True instead.
Shuffling the dataset may not be deterministic, which means the data in each split
will be different in each epoch. Furthermore, if sharding occurs after split, each
shard may not be part of the same split.
Raises:
RuntimeError: If get_dataset_size returns None or is not supported for this dataset.
RuntimeError: If `sizes` is list of integers and sum of all elements in sizes does not
equal the dataset size.
RuntimeError: If `sizes` is list of float and there is a split with size 0 after calculations.
RuntimeError: If the dataset is sharded prior to calling split.
ValueError: If `sizes` is list of float and not all floats are between 0 and 1, or if the
floats don't sum to 1.
Returns:
tuple(Dataset), a tuple of datasets that have been split.
Examples:
>>> # Since many datasets have shuffle on by default, set shuffle to False if split will be called!
>>> dataset = ds.ImageFolderDataset(image_folder_dataset_dir, shuffle=False)
>>>
>>> # Set the seed, and tell split to use this seed when randomizing.
>>> # This is needed because sharding will be done later
>>> ds.config.set_seed(58)
>>> train_dataset, test_dataset = dataset.split([0.9, 0.1])
>>>
>>> # To shard the train dataset, use a DistributedSampler
>>> train_sampler = ds.DistributedSampler(10, 2)
>>> train_dataset.use_sampler(train_sampler)
"""
if self.is_shuffled():
logger.warning("Dataset is shuffled before split.")
if self.is_sharded():
raise RuntimeError("Dataset should not be sharded before split.")
absolute_sizes = self._get_absolute_split_sizes(sizes)
splits = []
current_split_start_index = 0
for size in absolute_sizes:
ds = copy.deepcopy(self)
ds.dataset_size = None
if randomize:
# want to shuffle the same way every epoch before split, we are assuming
# that the user will call set_seed
random_sampler = samplers.RandomSampler()
random_sampler.reshuffle_each_epoch = False
ds.add_sampler(random_sampler)
subset_sampler = samplers.SequentialSampler(current_split_start_index, size)
ds.add_sampler(subset_sampler)
# add sequential sampler, so that if user calls use_sampler, we will
# get rid of the sequential sampler instead of something we need
ds.add_sampler(samplers.SequentialSampler())
splits.append(ds)
current_split_start_index += size
return tuple(splits)
class BucketBatchByLengthDataset(Dataset):
"""
The result of applying BucketBatchByLength operator to the input dataset.
"""
def __init__(self, input_dataset, column_names, bucket_boundaries, bucket_batch_sizes, element_length_function,
pad_info, pad_to_bucket_boundary, drop_remainder):
super().__init__(children=input_dataset)
self.column_names = to_list(column_names)
self.bucket_boundaries = replace_none(bucket_boundaries, [])
self.bucket_batch_sizes = replace_none(bucket_batch_sizes, [])
self.element_length_function = element_length_function
self.pad_info = replace_none(pad_info, {})
self.pad_to_bucket_boundary = replace_none(pad_to_bucket_boundary, False)
self.drop_remainder = replace_none(drop_remainder, False)
def parse(self, children=None):
return cde.BucketBatchByLengthNode(children[0], self.column_names, self.bucket_boundaries,
self.bucket_batch_sizes, self.element_length_function, self.pad_info,
self.pad_to_bucket_boundary, self.drop_remainder)
class BatchDataset(Dataset):
"""
The result of applying Batch operator to the input dataset.
Args:
input_dataset (Dataset): Input Dataset to be batched.
batch_size (Union[int, function]): The number of rows each batch is created with. An
int or callable which takes exactly 1 parameter, BatchInfo.
drop_remainder (bool, optional): Determines whether or not to drop the last
possibly incomplete batch (default=False). If True, and if there are less
than batch_size rows available to make the last batch, then those rows will
be dropped and not propagated to the child node.
num_parallel_workers (int, optional): Number of workers to process the dataset in parallel (default=None).
per_batch_map (callable, optional): Per batch map callable. A callable which takes
(list[Tensor], list[Tensor], ..., BatchInfo) as input parameters. Each list[Tensor] represents a batch of
Tensors on a given column. The number of lists should match with number of entries in input_columns. The
last parameter of the callable must always be a BatchInfo object.
input_columns (Union[str, list[str]], optional): List of names of the input columns. The size of the list must
match with signature of per_batch_map callable.
output_columns (Union[str, list[str]], optional): List of names assigned to the columns outputted by
the last operation. This parameter is mandatory if len(input_columns) !=
len(output_columns). The size of this list must match the number of output
columns of the last operation. (default=None, output columns will have the same
name as the input columns, i.e., the columns will be replaced).
column_order (Union[str, list[str]], optional): Specifies the list of all the columns you need in the whole
dataset. The parameter is required when len(input_column) != len(output_column). Caution: the list here
is not just the columns specified in parameter input_columns and output_columns.
pad_info (dict, optional): Whether to perform padding on selected columns. pad_info={"col1":([224,224],0)}
will pad column with name "col1" to a tensor of size [224,224] and fill the missing with 0.
max_rowsize(int, optional): Maximum size of row in MB that is used for shared memory allocation to copy
data between processes. This is only used if python_multiprocessing is set to True (default=16).
"""
def __init__(self, input_dataset, batch_size, drop_remainder=False, num_parallel_workers=None, per_batch_map=None,
input_columns=None, output_columns=None, column_order=None, pad_info=None,
python_multiprocessing=False, max_rowsize=16):
super().__init__(children=input_dataset, num_parallel_workers=num_parallel_workers)
if BatchDataset._is_ancestor_of_repeat(input_dataset):
logger.warning("Repeat is located before batch, data from two epochs can be batched together.")
BatchDataset._update_batch_size_for_syncwait(input_dataset, batch_size)
# if batch_size is callable, set batch_size to 1 and batch_size_func to that callable function
self.batch_size = batch_size if not callable(batch_size) else 1
self.batch_size_func = None if not callable(batch_size) else batch_size
self.drop_remainder = replace_none(drop_remainder, False)
self.per_batch_map = per_batch_map
self.input_columns = to_list(input_columns)
self.output_columns = to_list(output_columns)
self.column_order = to_list(column_order)
self.pad = bool(pad_info is not None)
self.pad_info = replace_none(pad_info, dict())
self.python_multiprocessing = python_multiprocessing
self.process_pool = None
self.hook = None
self.pids = []
self.eot = None
self.watch_dog = None
self.max_rowsize = max_rowsize
def parse(self, children=None):
return cde.BatchNode(children[0], self.batch_size, self.drop_remainder, self.pad, self.input_columns,
self.output_columns, self.column_order, self.batch_size_func, self.per_batch_map,
self.pad_info)
@staticmethod
def _is_ancestor_of_repeat(dataset):
"""
Utility function to find the case where repeat is used before batch.
Args:
dataset (Dataset): Dataset to be checked.
Returns:
bool, whether repeat is used before batch.
"""
if isinstance(dataset, RepeatDataset):
return True
flag = False
for input_dataset in dataset.children:
flag = flag | BatchDataset._is_ancestor_of_repeat(input_dataset)
return flag
@staticmethod
def _update_batch_size_for_syncwait(dataset, batch_size):
"""
Utility function to notify batch size to sync_wait.
Args:
dataset (Dataset): Dataset to be checked.
batch_size (int): batch size to notify.
"""
if isinstance(dataset, SyncWaitDataset):
dataset.update_sync_batch_size(batch_size)
for input_dataset in dataset.children:
BatchDataset._update_batch_size_for_syncwait(input_dataset, batch_size)
def __deepcopy__(self, memodict):
return self.__safe_deepcopy__(memodict, exclude=("per_batch_map", "batch_size_func", "__transfer_dataset__"))
# Iterator bootstrap will be called on iterator construction.
# A deep copy of Dataset object is created prior of iterator_bootstrap.
# This method will create per iterator process pool and bind pyfunc execution to the pool.
def iterator_bootstrap(self):
"""
Per iterator bootstrap callback.
"""
if self.python_multiprocessing:
if self.per_batch_map is None:
logger.warning("per_batch_map is None so python_multiprocessing does not work.")
return
arg_q_list = []
res_q_list = []
# If user didn't specify num_parallel_workers, set it to default
if self.num_parallel_workers is not None:
num_parallel = self.num_parallel_workers
else:
num_parallel = get_num_parallel_workers()
if get_enable_shared_mem():
_check_shm_usage(num_parallel, 1, self.max_rowsize * self.batch_size, 2)
for _ in range(num_parallel):
arg_q_list.append(_SharedQueue(1, max_rowsize=self.max_rowsize * self.batch_size))
res_q_list.append(_SharedQueue(1, max_rowsize=self.max_rowsize * self.batch_size))
# Construct pool with the callable list
# The callable list and _pyfunc_worker_init are used to pass lambda function in to subprocesses
self.process_pool = multiprocessing.Pool(processes=num_parallel,
initializer=_pyfunc_worker_init,
initargs=([self.per_batch_map], arg_q_list, res_q_list))
idx = 0
global _OP_NAME, _OP_PROCESS, _LOCK
op_id = _OP_NAME[str(self)]
process_id = {op_id: [self.num_parallel_workers, set()]}
# obtain process id from multiprocessing.pool
for pool in self.process_pool._pool: # pylint: disable=W0212
process_id[op_id][1].add(pool.pid)
self.pids.append(pool.pid)
with _LOCK:
_OP_PROCESS.update(process_id)
# Wrap per_batch_map into _PythonCallable
self.per_batch_map = _PythonCallable(self.per_batch_map, idx, self.process_pool, arg_q_list, res_q_list)
self.hook = _ExceptHookHandler()
# batch will launch a watch dog thread to monitoring sub processes
self._launch_watch_dog()
atexit.register(_mp_pool_exit_preprocess)
# If Python version greater than 3.8, we need to close ThreadPool in atexit for unclean pool teardown.
if sys.version_info >= (3, 8):
atexit.register(self.process_pool.close)
else:
if self.per_batch_map is not None:
self.per_batch_map = FuncWrapper(self.per_batch_map)
def _launch_watch_dog(self):
if platform.system().lower() != 'windows':
self.eot = threading.Event()
self.watch_dog = threading.Thread(target=_watch_dog, args=(self.eot, self.pids))
self.watch_dog.daemon = True
self.watch_dog.start()
def _abort_watchdog(self):
if not self.eot.is_set():
self.eot.set()
def __del__(self):
if hasattr(self, 'process_pool') and self.process_pool is not None:
self.process_pool.close()
if hasattr(self, 'watch_dog') and self.watch_dog is not None and hasattr(self, 'eot') and self.eot is not None:
self._abort_watchdog()
class BatchInfo(cde.CBatchInfo):
"""
The information object associates with the current batch of tensors.
"""
def get_batch_num(self):
"""
Return the batch number of the current batch.
"""
return
def get_epoch_num(self):
"""
Return the epoch number of the current batch.
"""
return
class BlockReleasePair:
"""
The blocking condition class used by SyncWaitDataset.
Args:
init_release_rows (int): Number of lines to allow through the pipeline.
callback (function): The callback function that will be called when release is called (default=None).
"""
def __init__(self, init_release_rows, callback=None):
if isinstance(init_release_rows, int) and init_release_rows <= 0:
raise ValueError("release_rows need to be greater than 0.")
self.row_count = -init_release_rows
self.cv = threading.Condition()
self.callback = callback
self.default_rows = init_release_rows
self.disable = False
def __deepcopy__(self, memodict):
return self
def reset(self):
with self.cv:
self.row_count = -self.default_rows
self.cv.notify_all()
def update_batched_size(self, batch_size):
# sanity check
if isinstance(batch_size, int) and batch_size <= 0:
raise ValueError("batch_size need to be greater than 0.")
# should only use before the pipeline creates
self.row_count *= batch_size
self.default_rows *= batch_size
def block_func(self):
"""
Function for handing blocking condition.
Returns:
bool, True.
"""
with self.cv:
# if disable is true, the always evaluate to true
not_time_out = self.cv.wait_for(lambda: (self.row_count < 0 or self.disable),
timeout=get_callback_timeout())
# time_out will be False if time out occurs
if not not_time_out:
logger.warning("Timeout happened in sync_wait, maybe dataset.sync_update(condition=...) "
"is not added after dataset.create_dict_iterator(...), now disabling lock.")
self.disable = True
self.row_count += 1
return True
def release_func(self, pass_rows=None, data=None):
with self.cv:
if pass_rows is None:
pass_rows = self.default_rows
self.row_count -= pass_rows
if self.callback is not None:
self.callback(data)
self.cv.notify_all()
def disable_lock(self):
with self.cv:
self.disable = True
self.cv.notify_all()
class SyncWaitDataset(Dataset):
"""
The result of adding a blocking condition to the input Dataset.
Args:
input_dataset (Dataset): Input dataset to apply flow control.
num_batch (int): Number of batches without blocking at the start of each epoch.
condition_name (str): Condition name that is used to toggle sending next row.
callback (function): Callback function that will be invoked when sync_update is called (default=None).
Raises:
RuntimeError: If condition name already exists.
"""
def __init__(self, input_dataset, condition_name, num_batch, callback=None):
super().__init__(children=input_dataset)
# set to the default value, waiting for the batch to update it
self._condition_name = condition_name
if isinstance(num_batch, int) and num_batch <= 0:
raise ValueError("num_batch need to be greater than 0.")
self._pair = BlockReleasePair(num_batch, callback)
if self._condition_name in self.children[0].get_sync_notifiers():
raise RuntimeError("Condition name is already in use.")
logger.info("Please remember to add dataset.sync_update(condition=%s), otherwise hanging will result. "
"If dataset.sync_update(condition=%s) has already been added, you can ignore the info.",
condition_name, condition_name)
def parse(self, children=None):
return cde.SyncWaitNode(children[0], self._condition_name, self._pair.block_func)
def get_sync_notifiers(self):
return {**self.children[0].get_sync_notifiers(), **{self._condition_name: self._pair.release_func}}
def is_sync(self):
return True
def update_sync_batch_size(self, batch_size):
if isinstance(batch_size, int) and batch_size <= 0:
raise ValueError("num_batch need to be greater than 0.")
self._pair.update_batched_size(batch_size)
def disable_sync(self):
logger.info("Disabling Sync")
self._pair.disable_lock()
@staticmethod
def _is_ancestor_of_batch(dataset):
"""
Utility function to find the case where sync_wait is used before batch.
Args:
dataset (Dataset): Dataset to be checked.
Returns:
bool, whether sync_wait is used before batch.
"""
if isinstance(dataset, BatchDataset):
return True
flag = False
for input_dataset in dataset.children:
flag = flag | SyncWaitDataset._is_ancestor_of_batch(input_dataset)
return flag
def iterator_bootstrap(self):
self._pair.reset()
class ShuffleDataset(Dataset):
"""
The result of applying Shuffle operator to the input Dataset.
Args:
input_dataset (Dataset): Input Dataset to be shuffled.
buffer_size (int): Size of the buffer.
Raises:
RuntimeError: If exist sync operators before shuffle.
"""
def __init__(self, input_dataset, buffer_size):
super().__init__(children=input_dataset)
self.buffer_size = buffer_size
self.reshuffle_each_epoch = True
if self.is_sync():
raise RuntimeError("No shuffle after sync operators.")
def parse(self, children=None):
return cde.ShuffleNode(children[0], self.buffer_size, self.reshuffle_each_epoch)
def is_shuffled(self):
return True
# This wait function is for cleaning zombie subprocesses
def wait_pid():
"""
This function is used by the main process to release subprocess resources.
"""
try:
while True:
child_pid, _ = os.waitpid(-1, os.WNOHANG)
if child_pid == 0:
break
except OSError:
# waitpid may be failed for some reasons so we ignore this error
pass
# Dataset need _watch_dog thread to monitoring fork multi-processing,
# and thread can't be a member function otherwise python won't collect and release resources.
def _watch_dog(eot, pids):
"""
This thread is for monitoring subprocesses forked by GeneratorDataset/map/batch
"""
while not eot.is_set():
subprocess_exit_num = 0
# Monitoring and count how many subprocesses already exit
for pid in pids:
try:
p = psutil.Process(pid)
if p.status() == psutil.STATUS_ZOMBIE:
subprocess_exit_num += 1
except psutil.NoSuchProcess:
subprocess_exit_num += 1
# If find subprocess exit, we will wait for 30s and do some waitpid operations
if subprocess_exit_num > 0:
start = time.time()
while time.time() - start < 30:
# We need to distinguishing get_dataset_size or train finished normally and hang scenario.
# If get_dataset_size or train finished normally, _stop_subprocess can be execute and
# self.need_abort can be set to True. If main process is hang in get(), self.need_abort
# will never set to True, then we wait for 30s and kill main process
if eot.is_set():
return
# Sometimes subprocess may be zombie, so in 30s we can wait and do some useful tasks(waitpid).
wait_pid()
## multiprocessing.queue may hang in .get() forever when put() process was killed.
## We have to exit main process otherwise main process will hang.
logger.critical("The subprocess of dataset may exit unexpected or be killed, "
"main process will exit.")
os.kill(os.getpid(), signal.SIGTERM)
# Pyfunc collection for multiprocess pyfunc
# This global variable will only be used within subprocesses
_GLOBAL_PYFUNC_LIST = []
_ARGS_QUEUE = []
_RET_QUEUE = []
_OP_NAME = dict()
_OP_PROCESS = dict()
_LOCK = threading.Lock()
# Pyfunc worker init function
# Python multiprocessing library forbid sending lambda function through pipe.
# This init function allow us to add all Python function to a global collection and then fork afterwards.
def _pyfunc_worker_init(pyfunc_list, args_queue, ret_queue):
global _GLOBAL_PYFUNC_LIST
global _ARGS_QUEUE
global _RET_QUEUE
_GLOBAL_PYFUNC_LIST = pyfunc_list
_ARGS_QUEUE = args_queue
_RET_QUEUE = ret_queue
# Pyfunc worker execution function
# All exceptions will be raised to main processes
def _pyfunc_worker_exec(index, qid, *args):
"""
Internal function for call certain pyfunc in Python process.
"""
# Some threads in multiprocess.pool can't process sigint signal,
# and will occur hang problem, so ctrl+c will pass to parent process.
signal.signal(signal.SIGINT, signal.SIG_IGN)
if qid != -1:
# Pass arguments through the Queue instead of directly to remote process
args = _ARGS_QUEUE[qid].get()
try:
r = _GLOBAL_PYFUNC_LIST[index](*args)
except Exception:
return ExceptionHandler(where="in map(or batch) worker and execute python function")
if isinstance(r, tuple):
_RET_QUEUE[qid].put(r)
else:
_RET_QUEUE[qid].put((r,))
return [qid]
# not using shared memory for passing arguments, call function directly
result = None
try:
result = _GLOBAL_PYFUNC_LIST[index](*args)
except Exception:
result = ExceptionHandler(where="in map(or batch) worker and execute python function")
return result
# PythonCallable wrapper for multiprocess pyfunc
class _PythonCallable:
"""
Internal Python function wrapper for multiprocessing pyfunc.
"""
def __init__(self, py_callable, idx, pool=None, arg_q=None, res_q=None):
# Original Python callable from user.
self.py_callable = py_callable
# Process pool created for current iterator.
self.pool = pool
# Python callable index for subprocess _GLOBAL_PYFUNC_LIST
self.idx = idx
if pool is not None:
self.queuemap = {}
self.arg_q = arg_q
self.res_q = res_q
self.next_queue = 0
def __call__(self, *args):
if self._pool_is_running() and check_iterator_cleanup() is False:
result, qid, ret = self._send(*args)
if ret:
return result
# todo this check might be wrong
while check_iterator_cleanup() is False:
try:
return self._receive(result, qid)
except multiprocessing.TimeoutError:
continue
except KeyboardInterrupt:
_set_iterator_cleanup()
self.pool.close()
self.pool.join()
raise Exception("Multiprocess MapOp worker receives KeyboardInterrupt.")
return (None,)
# Invoke original Python callable in master process in case the pool is gone.
return self.py_callable(*args)
def to_json(self):
return self.py_callable.to_json()
def _send(self, *args):
"""
The map/batch operator will use multiprocessing-pool apply_async interface to execute python function
in a sub process, apply_async will release GIL temporarily. For better performance, we use shared memory
feature and pass shared queue instead of multiprocess args.
"""
ret = False
qid = None
if self.arg_q != []:
tid = threading.get_ident()
# Need to register each thread to use a different queue to send data to pool
if not tid in self.queuemap:
qid = self.next_queue
self.next_queue = self.next_queue + 1
self.queuemap[tid] = qid
else:
qid = self.queuemap[tid]
self.arg_q[qid].put(args)
# This call will send the tensors along with Python callable index to the process pool.
# Block, yield GIL. Current thread will reacquire GIL once result is returned.
if self._pool_is_running() and check_iterator_cleanup() is False:
result = self.pool.apply_async(_pyfunc_worker_exec, [self.idx, qid, []])
else:
ret = True
result = self.py_callable(*args)
else:
result = self.pool.apply_async(_pyfunc_worker_exec, [self.idx, -1, *args])
return result, qid, ret
def _receive(self, result, qid):
"""
The map/batch operator will use multiprocessing-pool get interface to sync output data from a sub process,
get interface will reacquire GIL. For better performance, we use shared memory feature and get data from
shared queue directly.
"""
if self.arg_q != []:
r = result.get(30)
if isinstance(r, ExceptionHandler):
r.reraise()
if r[0] != qid:
raise Exception("In PyCallable, got results from wrong thread")
r = self.res_q[qid].get()
return r
r = result.get(30)
if isinstance(r, ExceptionHandler):
r.reraise()
return r
def _pool_is_running(self):
# note here: the RUN state of python3.7 and python3.8 is different:
# python3.7: RUN = 0
# python3.8: RUN = "RUN"
# so we use self.pool._state == RUN instead and we can't use _state == 0 any more.
if self.pool is not None and self.pool._state == RUN: # pylint: disable=W0212
return True
return False
def _mp_pool_exit_preprocess():
if check_iterator_cleanup() is False:
# Set the iterator_cleanup flag to True before exiting, and wait 3s for all apply_async
# applied to the multiprocessing task to prevent multiprocessing from hang when exiting
_set_iterator_cleanup()
time.sleep(3)
class _ExceptHookHandler:
def __init__(self):
sys.excepthook = self.__handler_exception
def __handler_exception(self, ex_type, value, tb):
logger.critical("Uncaught exception: ", exc_info=(ex_type, value, tb))
_mp_pool_exit_preprocess()
class MapDataset(Dataset):
"""
The result of applying the Map operator to the input Dataset.
Args:
input_dataset (Dataset): Input Dataset to be mapped.
operations (TensorOp): A function mapping a nested structure of tensors
to another nested structure of tensor (default=None).
input_columns (Union[str, list[str]]): List of names of the input columns
(default=None, the operations will be applied on the first columns in the dataset).
The size of the list should match the number of inputs of the first operator.
output_columns (Union[str, list[str]], optional): List of names of the output columns.
The size of the list should match the number of outputs of the last operator
(default=None, output columns will be the input columns, i.e., the columns will
be replaced).
column_order (list[str], optional): Specifies the list of all the columns you need in the whole
dataset. The parameter is required when len(input_column) != len(output_column). Caution: the list here
is not just the columns specified in parameter input_columns and output_columns.
num_parallel_workers (int, optional): Number of workers to process the dataset
in parallel (default=None).
python_multiprocessing (bool, optional): Parallelize Python operations with multiple worker process. This
option could be beneficial if the Python operation is computational heavy (default=False).
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
callbacks (DSCallback, list[DSCallback], optional): List of Dataset callbacks to be called (Default=None)
max_rowsize(int, optional): Maximum size of row in MB that is used for shared memory allocation to copy
data between processes. This is only used if python_multiprocessing is set to True (default=16).
Raises:
ValueError: If len(input_columns) != len(output_columns) and column_order is not specified.
"""
def __init__(self, input_dataset, operations=None, input_columns=None, output_columns=None, column_order=None,
num_parallel_workers=None, python_multiprocessing=False, cache=None, callbacks=None, max_rowsize=16):
super().__init__(children=input_dataset, num_parallel_workers=num_parallel_workers, cache=cache)
self.operations = to_list(operations)
self.operations = py_transforms.Compose.reduce(self.operations)
self.input_columns = to_list(input_columns)
self.output_columns = to_list(output_columns)
self.column_order = replace_none(column_order, [])
# If output_columns were not provided then use input_columns
self.output_columns = self.input_columns if not self.output_columns else self.output_columns
if self.input_columns and self.output_columns \
and len(self.input_columns) != len(self.output_columns) \
and not self.column_order:
raise ValueError("When length of input_columns and output_columns are not equal,"
" column_order must be specified.")
self.python_multiprocessing = python_multiprocessing
self.process_pool = None
self.hook = None
self.pids = []
self.eot = None
self.watch_dog = None
self.callbacks = to_list(callbacks)
self.max_rowsize = max_rowsize
def parse(self, children=None):
operations = []
for op in self.operations:
if op and getattr(op, 'parse', None):
operations.append(op.parse())
else:
operations.append(op)
callbacks = [cb.create_runtime_obj() for cb in self.callbacks]
return cde.MapNode(children[0], operations, self.input_columns, self.output_columns, self.column_order,
callbacks)
def __deepcopy__(self, memodict):
return self.__safe_deepcopy__(memodict, exclude=("operations", "callbacks", "__transfer_dataset__"))
# Iterator bootstrap will be called on iterator construction.
# A deep copy of Dataset object is created prior of iterator_bootstrap.
# This method will create per iterator process pool and bind pyfunc execution to the pool.
def iterator_bootstrap(self):
"""
Per iterator bootstrap callback.
"""
if self.python_multiprocessing:
iter_specific_operations = []
callable_list = []
arg_q_list = []
res_q_list = []
# If user didn't specify num_parallel_workers, set it to default
num_parallel = get_num_parallel_workers()
if self.num_parallel_workers is not None:
num_parallel = self.num_parallel_workers
if get_enable_shared_mem():
_check_shm_usage(num_parallel, 1, self.max_rowsize, 2)
for _ in range(num_parallel):
arg_q_list.append(_SharedQueue(1, max_rowsize=self.max_rowsize))
res_q_list.append(_SharedQueue(1, max_rowsize=self.max_rowsize))
# Pass #1, look for Python callables and build list
for op in self.operations:
# our c transforms is now callable and should not be run in Python multithreading
if MapDataset.__operation_valid_for_multiprocessing(op):
callable_list.append(op)
if callable_list:
# Construct pool with the callable list
# The callable list and _pyfunc_worker_init are used to pass lambda function in to subprocesses
self.process_pool = multiprocessing.Pool(processes=num_parallel,
initializer=_pyfunc_worker_init,
initargs=(callable_list, arg_q_list, res_q_list))
# Pass #2
idx = 0
global _OP_NAME, _OP_PROCESS, _LOCK
op_id = _OP_NAME[str(self)]
# obtain process id from multiprocessing.pool
process_id = {op_id: [self.num_parallel_workers, set()]}
for pool in self.process_pool._pool: # pylint: disable=W0212
process_id[op_id][1].add(pool.pid)
self.pids.append(pool.pid)
with _LOCK:
_OP_PROCESS.update(process_id)
for op in self.operations:
# our c transforms is now callable and should not be run in Python multithreading
if MapDataset.__operation_valid_for_multiprocessing(op):
# Wrap Python callable into _PythonCallable
iter_specific_operations.append(_PythonCallable(op, idx, self.process_pool,
arg_q_list, res_q_list))
idx += 1
else:
# CPP ops remain the same
iter_specific_operations.append(op)
self.operations = iter_specific_operations
self.hook = _ExceptHookHandler()
# Map multiprocessing will launch a watch dog thread for monitoring sub processes
self._launch_watch_dog()
atexit.register(_mp_pool_exit_preprocess)
# If Python version greater than 3.8, we need to close ThreadPool in atexit for unclean pool teardown.
if sys.version_info >= (3, 8):
atexit.register(self.process_pool.close)
@staticmethod
def __operation_valid_for_multiprocessing(op):
if callable(op) and str(op).find("c_transform") < 0:
return True
return False
def _launch_watch_dog(self):
if platform.system().lower() != 'windows':
self.eot = threading.Event()
self.watch_dog = threading.Thread(target=_watch_dog, args=(self.eot, self.pids))
self.watch_dog.daemon = True
self.watch_dog.start()
def _abort_watchdog(self):
if not self.eot.is_set():
self.eot.set()
def __del__(self):
if hasattr(self, 'process_pool') and self.process_pool is not None:
self.process_pool.close()
self.process_pool.join()
if hasattr(self, 'watch_dog') and self.watch_dog is not None and hasattr(self, 'eot') and self.eot is not None:
self._abort_watchdog()
class FilterDataset(Dataset):
"""
The result of applying filter predicate to the input Dataset.
Args:
input_dataset (Dataset): Input Dataset to be mapped.
predicate (callable): Python callable which returns a boolean value. If False then filter the element.
input_columns (Union[str, list[str]], optional): List of names of the input columns
(default=None, the predicate will be applied to all columns in the dataset).
num_parallel_workers (int, optional): Number of workers to process the dataset
in parallel (default=None).
"""
def __init__(self, input_dataset, predicate, input_columns=None, num_parallel_workers=None):
super().__init__(children=input_dataset, num_parallel_workers=num_parallel_workers)
self.predicate = lambda *args: bool(predicate(*args))
self.input_columns = to_list(input_columns)
def parse(self, children=None):
return cde.FilterNode(children[0], self.predicate, self.input_columns)
class RepeatDataset(Dataset):
"""
The result of applying Repeat operator to the input Dataset.
Args:
input_dataset (Dataset): Input Dataset to be repeated.
count (int): Number of times the dataset will be repeated (default=-1, repeat indefinitely).
"""
def __init__(self, input_dataset, count):
super().__init__(children=input_dataset)
self.count = replace_none(count, -1)
def parse(self, children=None):
return cde.RepeatNode(children[0], self.count)
class SkipDataset(Dataset):
"""
The result of applying Skip operator to the input Dataset.
Args:
input_dataset (Dataset): Input dataset to have elements skipped.
count (int): Number of elements to be skipped in the dataset.
"""
def __init__(self, input_dataset, count):
super().__init__(input_dataset)
self.count = count
def parse(self, children=None):
return cde.SkipNode(children[0], self.count)
class TakeDataset(Dataset):
"""
The result of applying Take operator to the input Dataset.
Args:
input_dataset (Dataset): Input Dataset to have elements taken from.
count (int): Number of elements to be taken from the dataset.
"""
def __init__(self, input_dataset, count):
super().__init__(children=input_dataset)
self.count = count
def parse(self, children=None):
return cde.TakeNode(children[0], self.count)
class ZipDataset(Dataset):
"""
The result of applying Zip operator to the input Dataset.
Args:
datasets (tuple): A tuple of datasets to be zipped together.
Raises:
TypeError: If dataset is not an instance of Dataset.
"""
def __init__(self, datasets):
super().__init__(children=datasets)
def parse(self, children=None):
return cde.ZipNode(children)
def is_sync(self):
return any([c.is_sync() for c in self.children])
class ConcatDataset(Dataset):
"""
The result of applying concat dataset operator to the input Dataset.
Args:
datasets (list): A list of datasets to be concatenated together.
Raises:
TypeError: If dataset is not an instance of Dataset.
ValueError: If there is no samples in the one of the datasets.
"""
def __init__(self, datasets):
super().__init__(children=datasets)
for dataset in datasets:
if not isinstance(dataset, Dataset):
raise TypeError("Invalid dataset, expected Dataset object, but got %s!" % type(dataset))
self.datasets = datasets
self._sampler = samplers.SequentialSampler(num_samples=None)
self.children_sizes_ = [c.get_dataset_size() for c in self.children]
child_index = 0
for item in self.children_sizes_:
if item == 0:
raise ValueError("There are no samples in the dataset number %d. Please make sure there are "
"valid samples in the dataset." % child_index)
child_index += 1
# _children_flag_and_nums: A list of pair<int ,int>.The first element of pair is flag that characterizes
# whether the data set is mappable. The second element of pair is length of the dataset
self._children_flag_and_nums = []
# _children_start_end_index_: A list of pair<int ,int>.The elements of pair are used to characterize
# the valid position of the dataset corresponding to the subscript when sampling
self._children_start_end_index_ = []
for index, child in enumerate(self.children):
tem_list = [-1, -1]
self._children_start_end_index_.append(tem_list)
dataset_len = self.children_sizes_[index]
if isinstance(child, GeneratorDataset) and not hasattr(child.source, "__getitem__"):
dataset_len = 0
self.children_sizes_[index] = 0
if isinstance(child, MappableDataset):
self._children_flag_and_nums.append((0, dataset_len))
else:
self._children_flag_and_nums.append((1, dataset_len))
def parse(self, children=None):
return cde.ConcatNode(children, self._sampler, self._children_flag_and_nums, self._children_start_end_index_)
def use_sampler(self, sampler):
"""
Set the distributedSampler to concat dataset
Args:
sampler (Sampler): The sampler to use for the current dataset.
Currently supported: DistributedSampler.
Raises:
TypeError: If the sampler is not an instance of DistributedSampler
ValueError: If the parameter shuffle of sampler is True
ValueError: If the parameter NumSamples of sampler is not None.
ValueError: If num_shards <=0.
"""
if not isinstance(sampler, samplers.DistributedSampler):
raise TypeError("The parameter %s of concat must be DistributedSampler!" % sampler)
if sampler.is_shuffled():
raise ValueError("The parameter shuffle of DistributedSampler must be False!")
if sampler.num_shards <= 0:
raise ValueError("The parameter num_shards of DistributedSampler must be positive int!")
if sampler.get_num_samples() is not None:
raise ValueError("The parameter num_samples of DistributedSampler is not support to be set!")
self.dataset_size = None
self._sampler = sampler
cumulative_samples_nums = 0
for index, child in enumerate(self.children):
if hasattr(child, 'sampler') and child.sampler.get_num_samples() is not None:
raise ValueError("The parameter NumSamples of %s is not support to be set!" % child)
if isinstance(child, BatchDataset):
raise TypeError("The parameter %s of concat must not be BatchDataset!" % child)
# if child is mappable and the length is greater than 0
if not self._children_flag_and_nums[index][0] and self._children_flag_and_nums[index][1]:
tem_value = cumulative_samples_nums + self._children_flag_and_nums[index][1]
if not self._children_flag_and_nums[index][1] >= sampler.num_shards:
if tem_value < sampler.num_shards:
self._children_start_end_index_[index][0] = cumulative_samples_nums
self._children_start_end_index_[index][1] = tem_value
else:
self._children_start_end_index_[index][0] = cumulative_samples_nums
self._children_start_end_index_[index][1] = tem_value % sampler.num_shards
tem_sampler = copy.deepcopy(sampler)
tem_sampler.set_offset(cumulative_samples_nums)
child.use_sampler(tem_sampler)
cumulative_samples_nums += self.children_sizes_[index]
cumulative_samples_nums %= sampler.num_shards
class RenameDataset(Dataset):
"""
The result of applying Rename operator to the input Dataset.
Args:
input_dataset (Dataset): Input Dataset to be Renamed.
input_columns (Union[str, list[str]]): List of names of the input columns.
output_columns (Union[str, list[str]]): List of names of the output columns.
"""
def __init__(self, input_dataset, input_columns, output_columns):
super().__init__(children=input_dataset)
self.input_column_names = to_list(input_columns)
self.output_column_names = to_list(output_columns)
def parse(self, children=None):
return cde.RenameNode(children[0], self.input_column_names, self.output_column_names)
def to_list(items):
if items is None:
return []
if isinstance(items, tuple):
return list(items)
if not isinstance(items, list):
return [items]
return items
class ProjectDataset(Dataset):
"""
The result of applying Project operator to the input Dataset.
Args:
input_dataset (Dataset): Input Dataset to be Projected.
columns (Union[str, list[str]]): List of names of the columns to project.
"""
def __init__(self, input_dataset, columns):
super().__init__(children=input_dataset)
self.columns = to_list(columns)
def parse(self, children=None):
return cde.ProjectNode(children[0], self.columns)
class _ToDevice:
"""
Internal class to handle sending data to device.
"""
def __init__(self, dataset, num_epochs):
ir_tree, self.api_tree = dataset.create_ir_tree()
self._runtime_context = cde.PythonRuntimeContext()
self._runtime_context.Init()
self._to_device = cde.ToDevice(num_epochs)
self._to_device.Init(ir_tree)
self._runtime_context.AssignConsumer(self._to_device)
ITERATORS_LIST.append(weakref.ref(self))
_unset_iterator_cleanup()
def send(self):
self._to_device.Send()
def stop_send(self):
"""
send stop send signal to pipeline, it is used when end of sequence is sent at the epoch end.
"""
self._to_device.StopSend()
def continue_send(self):
"""
send continue send signal to pipeline, it is used when end of sequence is sent at the epoch end.
"""
self._to_device.ContinueSend()
def get_data_info(self):
"""
Get type and shape of current batch.
"""
return self._to_device.GetDataInfo()
def release(self):
"""
Manually terminate Device Queue instead of relying on out of scope destruction.
"""
if hasattr(self, '_runtime_context') and self._runtime_context:
if hasattr(self, '_to_device') and self._to_device:
self._runtime_context.Terminate()
del self._to_device
del self._runtime_context
def __deepcopy__(self, memodict):
return self
class TransferDataset(Dataset):
"""
The result of applying TDT operator to the input Dataset.
Args:
input_dataset (Dataset): Input Dataset to be transferred.
send_epoch_end (bool, optional): Whether to send end of sequence to device or not (default=True).
create_data_info_queue (bool, optional): Whether to create queue which stores
types and shapes of data or not (default=False).
Raises:
TypeError: If device_type is empty.
ValueError: If device_type is not 'Ascend', 'GPU' or 'CPU'.
RuntimeError: If dataset is unknown.
"""
def __init__(self, input_dataset, send_epoch_end=True, create_data_info_queue=False):
super().__init__(children=input_dataset)
self.queue_name = str(uuid.uuid1())
self.device_type = context.get_context("device_target") if context else "CPU"
self.device_id = context.get_context("device_id") if context else 0
self._send_epoch_end = replace_none(send_epoch_end, True)
self._create_data_info_queue = create_data_info_queue
self._to_device = None
def parse(self, children=None):
total_batch = 0
if hasattr(self.children[0], "__total_batch__"):
total_batch = self.children[0].__total_batch__
return cde.TransferNode(children[0], self.queue_name, self.device_type, self.device_id, self._send_epoch_end,
total_batch, self._create_data_info_queue)
def create_dict_iterator(self, num_epochs=-1, output_numpy=False):
raise RuntimeError("TransferDataset is not iterable.")
def create_tuple_iterator(self, columns=None, num_epochs=-1, output_numpy=False, do_copy=True):
raise RuntimeError("TransferDataset is not iterable.")
def __iter__(self):
raise RuntimeError("TransferDataset is not iterable.")
def output_shapes(self):
raise RuntimeError("TransferDataset does not support obtaining output_shapes.")
def output_types(self):
raise RuntimeError("TransferDataset does not support obtaining output_types.")
@check_to_device_send
def send(self, num_epochs=-1):
"""
Send to device
"""
if Dataset._noop_mode():
return
if self._to_device is not None:
del self._to_device
self._to_device = _ToDevice(self, num_epochs)
self._to_device.send()
def stop_send(self):
if self._to_device is not None:
self._to_device.stop_send()
def continue_send(self):
if self._to_device is not None:
self._to_device.continue_send()
def get_data_info(self):
"""
Get type and shape of current batch
"""
if self._to_device is not None:
return self._to_device.get_data_info()
raise RuntimeError("Calling get_data_info with bad state.")
def release(self):
"""
Manually terminate Device Queue instead of relying on out of scope destruction.
"""
if self._to_device is not None:
self._to_device.release()
class RangeDataset(MappableDataset):
"""
A source dataset that reads and parses datasets stored on disk in a range.
Args:
start (int): Starting index.
stop (int): Ending index.
step (int): Step size in the range specified by start and stop.
"""
def __init__(self, start, stop, step):
super().__init__()
self.start = start
self.stop = stop
self.step = step
def parse(self, children=None):
raise NotImplementedError("Dataset has to implement parse method.")
def is_shuffled(self):
return False
def is_sharded(self):
return False
def get_dataset_size(self):
if self.dataset_size is None:
self.dataset_size = math.ceil((self.stop - self.start) / self.step)
return self.dataset_size
class FashionMnistDataset(MappableDataset):
"""
A source dataset for reading and parsing the FASHION-MNIST dataset.
The generated dataset has two columns :py:obj:`[image, label]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`label` is a scalar of the uint32 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str, optional): Usage of this dataset, can be `train`, `test` or `all`. `train` will read from 60,000
train samples, `test` will read from 10,000 test samples, `all` will read from all 70,000 samples.
(default=None, will read all samples)
num_samples (int, optional): The number of images to be included in the dataset
(default=None, will read all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, will use value set in the config).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset
(default=None, expected order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the maximum sample number of per shard.
shard_id (int, optional): The shard ID within `num_shards` (default=None). This
argument can only be specified when `num_shards` is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> fashion_mnist_dataset_dir = "/path/to/fashion_mnist_dataset_directory"
>>>
>>> # Read 3 samples from FASHIONMNIST dataset
>>> dataset = ds.FashionMnistDataset(dataset_dir=fashion_mnist_dataset_dir, num_samples=3)
>>>
>>> # Note: In FASHIONMNIST dataset, each dictionary has keys "image" and "label"
About Fashion-MNIST dataset:
Fashion-MNIST is a dataset of Zalando's article images—consisting of a training set of 60,000 examples and
a test set of 10,000 examples. Each example is a 28x28 grayscale image, associated with a label from 10 classes.
We intend Fashion-MNIST to serve as a direct drop-in replacement for the original MNIST dataset for benchmarking
machine learning algorithms. It shares the same image size and structure of training and testing splits.
Here is the original Fashion-MNIST dataset structure.
You can unzip the dataset files into this directory structure and read by MindSpore's API.
.. code-block::
.
└── fashionmnist_dataset_dir
├── t10k-images-idx3-ubyte
├── t10k-labels-idx1-ubyte
├── train-images-idx3-ubyte
└── train-labels-idx1-ubyte
Citation:
.. code-block::
@online{xiao2017/online,
author = {Han Xiao and Kashif Rasul and Roland Vollgraf},
title = {Fashion-MNIST: a Novel Image Dataset for Benchmarking Machine Learning Algorithms},
date = {2017-08-28},
year = {2017},
eprintclass = {cs.LG},
eprinttype = {arXiv},
eprint = {cs.LG/1708.07747},
}
"""
@check_mnist_cifar_dataset
def __init__(self, dataset_dir, usage=None, num_samples=None, num_parallel_workers=None, shuffle=None,
sampler=None, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.usage = replace_none(usage, "all")
def parse(self, children=None):
return cde.FashionMnistNode(self.dataset_dir, self.usage, self.sampler)
class ImageFolderDataset(MappableDataset):
"""
A source dataset that reads images from a tree of directories.
All images within one folder have the same label.
The generated dataset has two columns: :py:obj:`[image, label]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`label` is of a scalar of uint32 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
num_samples (int, optional): The number of images to be included in the dataset
(default=None, all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, set in the config).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset
(default=None, expected order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
extensions (list[str], optional): List of file extensions to be
included in the dataset (default=None).
class_indexing (dict, optional): A str-to-int mapping from folder name to index
(default=None, the folder names will be sorted
alphabetically and each class will be given a
unique index starting from 0).
decode (bool, optional): Decode the images after reading (default=False).
num_shards (int, optional): Number of shards that the dataset will be divided
into (default=None). When this argument is specified, `num_samples` reflects
the maximum sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
RuntimeError: If class_indexing is not a dictionary.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- The shape of the image column is [image_size] if decode flag is False, or [H,W,C] otherwise.
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> image_folder_dataset_dir = "/path/to/image_folder_dataset_directory"
>>>
>>> # 1) Read all samples (image files) in image_folder_dataset_dir with 8 threads
>>> dataset = ds.ImageFolderDataset(dataset_dir=image_folder_dataset_dir,
... num_parallel_workers=8)
>>>
>>> # 2) Read all samples (image files) from folder cat and folder dog with label 0 and 1
>>> dataset = ds.ImageFolderDataset(dataset_dir=image_folder_dataset_dir,
... class_indexing={"cat":0, "dog":1})
>>>
>>> # 3) Read all samples (image files) in image_folder_dataset_dir with extensions .JPEG and .png (case sensitive)
>>> dataset = ds.ImageFolderDataset(dataset_dir=image_folder_dataset_dir,
... extensions=[".JPEG", ".png"])
About ImageFolderDataset:
You can construct the following directory structure from your dataset files and read by MindSpore's API.
.. code-block::
.
└── image_folder_dataset_directory
├── class1
│ ├── 000000000001.jpg
│ ├── 000000000002.jpg
│ ├── ...
├── class2
│ ├── 000000000001.jpg
│ ├── 000000000002.jpg
│ ├── ...
├── class3
│ ├── 000000000001.jpg
│ ├── 000000000002.jpg
│ ├── ...
├── classN
├── ...
"""
@check_imagefolderdataset
def __init__(self, dataset_dir, num_samples=None, num_parallel_workers=None, shuffle=None, sampler=None,
extensions=None, class_indexing=None, decode=False, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.extensions = replace_none(extensions, [])
self.class_indexing = replace_none(class_indexing, {})
self.decode = replace_none(decode, False)
def parse(self, children=None):
return cde.ImageFolderNode(self.dataset_dir, self.decode, self.sampler, self.extensions, self.class_indexing)
class MnistDataset(MappableDataset):
"""
A source dataset for reading and parsing the MNIST dataset.
The generated dataset has two columns :py:obj:`[image, label]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`label` is a scalar of the uint32 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str, optional): Usage of this dataset, can be `train`, `test` or `all` . `train` will read from 60,000
train samples, `test` will read from 10,000 test samples, `all` will read from all 70,000 samples.
(default=None, will read all samples)
num_samples (int, optional): The number of images to be included in the dataset
(default=None, will read all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, will use value set in the config).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset
(default=None, expected order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the maximum sample number of per shard.
shard_id (int, optional): The shard ID within `num_shards` (default=None). This
argument can only be specified when `num_shards` is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> mnist_dataset_dir = "/path/to/mnist_dataset_directory"
>>>
>>> # Read 3 samples from MNIST dataset
>>> dataset = ds.MnistDataset(dataset_dir=mnist_dataset_dir, num_samples=3)
>>>
>>> # Note: In mnist_dataset dataset, each dictionary has keys "image" and "label"
About MNIST dataset:
The MNIST database of handwritten digits has a training set of 60,000 examples,
and a test set of 10,000 examples. It is a subset of a larger set available from
NIST. The digits have been size-normalized and centered in a fixed-size image.
Here is the original MNIST dataset structure.
You can unzip the dataset files into this directory structure and read by MindSpore's API.
.. code-block::
.
└── mnist_dataset_dir
├── t10k-images-idx3-ubyte
├── t10k-labels-idx1-ubyte
├── train-images-idx3-ubyte
└── train-labels-idx1-ubyte
Citation:
.. code-block::
@article{lecun2010mnist,
title = {MNIST handwritten digit database},
author = {LeCun, Yann and Cortes, Corinna and Burges, CJ},
journal = {ATT Labs [Online]},
volume = {2},
year = {2010},
howpublished = {http://yann.lecun.com/exdb/mnist}
}
"""
@check_mnist_cifar_dataset
def __init__(self, dataset_dir, usage=None, num_samples=None, num_parallel_workers=None, shuffle=None,
sampler=None, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.usage = replace_none(usage, "all")
def parse(self, children=None):
return cde.MnistNode(self.dataset_dir, self.usage, self.sampler)
class PhotoTourDataset(MappableDataset):
"""
A source dataset for reading and parsing the PhotoTour dataset.
The generated dataset with different usage has different output columns.
If train, the generated dataset has one column :py:obj:`[image]`,
else three columns :py:obj:`[image1, image2, matches]`.
The tensor of column :py:obj:`image`, :py:obj:`image1` and :py:obj:`image2` is of the uint8 type.
The tensor of column :py:obj:`matches` is a scalar of the uint32 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
name (str): Name of the dataset to load,
should be one of 'notredame', 'yosemite', 'liberty', 'notredame_harris',
'yosemite_harris' or 'liberty_harris'.
usage (str, optional): Usage of the dataset, can be `train` or `test` (Default=None, will be set to 'train').
When usage is `train`, number of samples for each `name` is
{'notredame': 468159, 'yosemite': 633587, 'liberty': 450092, 'liberty_harris': 379587,
'yosemite_harris': 450912, 'notredame_harris': 325295}.
When usage is `test`, will read 100,000 samples for testing.
num_samples (int, optional): The number of images to be included in the dataset
(default=None, will read all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, will use value set in the config).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset
(default=None, expected order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the max sample number of per shard.
shard_id (int, optional): The shard ID within `num_shards` (default=None). This
argument can only be specified when `num_shards` is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If dataset_dir is not exist.
ValueError: If usage is not in ["train", "test"].
ValueError: If name is not in ["notredame", "yosemite", "liberty",
"notredame_harris", "yosemite_harris", "liberty_harris"].
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a sampler. `sampler` and `shuffle` are mutually exclusive. The table
below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using 'sampler' and 'shuffle'
:widths: 64 64 1
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> # Read 3 samples from PhotoTour dataset.
>>> dataset = ds.PhotoTourDataset(dataset_dir="/path/to/photo_tour_dataset_directory",
... name='liberty', usage='train', num_samples=3)
>>>
>>> # In PhotoTourDataset dataset, if usage is 'train', each dictionary has key "image",
>>> # else has keys "image1" "image2" and "matches".
About PhotoTour dataset:
The data is taken from Photo Tourism reconstructions from Trevi Fountain (Rome), Notre Dame (Paris) and Half
Dome (Yosemite). Each dataset consists of a series of corresponding patches, which are obtained by projecting
3D points from Photo Tourism reconstructions back into the original images.
The dataset consists of 1024 x 1024 bitmap (.bmp) images, each containing a 16 x 16 array of image patches.
Each patch is sampled as 64 x 64 grayscale, with a canonical scale and orientation. For details of how the scale
and orientation is established, please see the paper. An associated metadata file info.txt contains the match
information. Each row of info.txt corresponds to a separate patch, with the patches ordered from left to right and
top to bottom in each bitmap image. The first number on each row of info.txt is the 3D point ID from which that
patch was sampled -- patches with the same 3D point ID are projected from the same 3D point (into different images).
The second number in info.txt corresponds to the image from which the patch was sampled, and is not used at present.
You can unzip the original PhotoTour dataset files into this directory structure and read by MindSpore's API.
.. code-block::
.
└── photo_tour_dataset_directory
├── liberty/
│ ├── info.txt // two columns: 3D_point_ID, unused
│ ├── m50_100000_100000_0.txt // seven columns: patch_ID1, 3D_point_ID1, unused1,
│ │ // patch_ID2, 3D_point_ID2, unused2, unused3
│ ├── patches0000.bmp // 1024*1024 pixels, with 16 * 16 patches.
│ ├── patches0001.bmp
│ ├── ...
├── yosemite/
│ ├── ...
├── notredame/
│ ├── ...
├── liberty_harris/
│ ├── ...
├── yosemite_harris/
│ ├── ...
├── notredame_harris/
│ ├── ...
Citation:
.. code-block::
@INPROCEEDINGS{4269996,
author={Winder, Simon A. J. and Brown, Matthew},
booktitle={2007 IEEE Conference on Computer Vision and Pattern Recognition},
title={Learning Local Image Descriptors},
year={2007},
volume={},
number={},
pages={1-8},
doi={10.1109/CVPR.2007.382971}
}
"""
@check_photo_tour_dataset
def __init__(self, dataset_dir, name, usage=None, num_samples=None, num_parallel_workers=None,
shuffle=None, sampler=None, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.name = name
self.usage = replace_none(usage, "train")
def parse(self, children=None):
return cde.PhotoTourNode(self.dataset_dir, self.name, self.usage, self.sampler)
class Places365Dataset(MappableDataset):
"""
A source dataset for reading and parsing the Places365 dataset.
The generated dataset has two columns :py:obj:`[image, label]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`label` is a scalar of the uint32 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str, optional): Usage of this dataset, can be `train-standard`, `train-challenge` or `val`
(default=None, will be set to 'train-standard').
small (bool, optional): Use 256 * 256 images (True) or high resolution images (False) (default=False).
decode (bool, optional): Decode the images after reading (default=True).
num_samples (int, optional): The number of images to be included in the dataset
(default=None, will read all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, will use value set in the config).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset
(default=None, expected order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the max sample number of per shard.
shard_id (int, optional): The shard ID within `num_shards` (default=None). This
argument can only be specified when `num_shards` is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
ValueError: If usage is not in ["train-standard", "train-challenge", "val"].
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a sampler. 'sampler' and 'shuffle' are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using 'sampler' and 'shuffle'
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> place365_dataset_dir = "/path/to/place365_dataset_directory"
>>>
>>> # Read 3 samples from Places365 dataset
>>> dataset = ds.Places365Dataset(dataset_dir=place365_dataset_dir, usage='train-standard',
... small=True, decode=True, num_samples=3)
>>>
>>> # In places365 dataset, each dictionary has keys "image" and "label".
About Places365 dataset:
Convolutional neural networks (CNNs) trained on the Places2 Database can be used for scene recognition as well as
generic deep scene features for visual recognition.
The author releases the data of Places365-Standard and the data of Places365-Challenge to the public.
Places365-Standard is the core set of Places2 Database, which has been used to train the Places365-CNNs. The author
will add other kinds of annotation on the Places365-Standard in the future. Places365-Challenge is the competition
set of Places2 Database, which has 6.2 million extra images compared to the Places365-Standard.
The Places365-Challenge will be used for the Places Challenge 2016.
You can unzip the original Places365 dataset files into this directory structure and read by MindSpore's API.
.. code-block::
.
└─├── categories_places365.txt
├── places365_train-standard.txt
├── places365_train-challenge.txt
├── val_large/
│ ├── Places365_val_00000001.jpg
│ ├── Places365_val_00000002.jpg
│ ├── Places365_val_00000003.jpg
│ ├── ...
├── val_256/
│ ├── ...
├── data_large_standard/
│ ├── ...
├── data_256_standard/
│ ├── ...
├── data_large_challenge/
│ ├── ...
├── data_256_challenge /
│ ├── ...
Citation:
.. code-block::
article{zhou2017places,
title={Places: A 10 million Image Database for Scene Recognition},
author={Zhou, Bolei and Lapedriza, Agata and Khosla, Aditya and Oliva, Aude and Torralba, Antonio},
journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},
year={2017},
publisher={IEEE}
}
"""
@check_places365_dataset
def __init__(self, dataset_dir, usage=None, small=True, decode=False, num_samples=None, num_parallel_workers=None,
shuffle=None, sampler=None, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = os.path.abspath(dataset_dir)
self.usage = replace_none(usage, "train-standard")
self.small = small
self.decode = decode
def parse(self, children=None):
return cde.Places365Node(self.dataset_dir, self.usage, self.small, self.decode, self.sampler)
class QMnistDataset(MappableDataset):
"""
A source dataset for reading and parsing the QMNIST dataset.
The generated dataset has two columns :py:obj:`[image, label]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`label` is a scalar when `compat` is True else a tensor both of the uint32 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str, optional): Usage of this dataset, can be `train`, `test`, `test10k`, `test50k`, `nist`
or `all` (default=None, will read all samples).
compat (bool, optional): Whether the label for each example is class number (compat=True) or the full QMNIST
information (compat=False) (default=True).
num_samples (int, optional): The number of images to be included in the dataset
(default=None, will read all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, will use value set in the config).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset
(default=None, expected order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the maximum sample number of per shard.
shard_id (int, optional): The shard ID within `num_shards` (default=None). This
argument can only be specified when `num_shards` is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> qmnist_dataset_dir = "/path/to/qmnist_dataset_directory"
>>>
>>> # Read 3 samples from QMNIST train dataset
>>> dataset = ds.QMnistDataset(dataset_dir=qmnist_dataset_dir, num_samples=3)
>>>
>>> # Note: In QMNIST dataset, each dictionary has keys "image" and "label"
About QMNIST dataset:
The QMNIST dataset was generated from the original data found in the NIST Special Database 19 with the goal to
match the MNIST preprocessing as closely as possible.
Through an iterative process, researchers tried to generate an additional 50k images of MNIST-like data.
They started with a reconstruction process given in the paper and used the Hungarian algorithm to find the best
matches between the original MNIST samples and their reconstructed samples.
Here is the original QMNIST dataset structure.
You can unzip the dataset files into this directory structure and read by MindSpore's API.
.. code-block::
.
└── qmnist_dataset_dir
├── qmnist-train-images-idx3-ubyte
├── qmnist-train-labels-idx2-int
├── qmnist-test-images-idx3-ubyte
├── qmnist-test-labels-idx2-int
├── xnist-images-idx3-ubyte
└── xnist-labels-idx2-int
Citation:
.. code-block::
@incollection{qmnist-2019,
title = "Cold Case: The Lost MNIST Digits",
author = "Chhavi Yadav and L\'{e}on Bottou",\
booktitle = {Advances in Neural Information Processing Systems 32},
year = {2019},
publisher = {Curran Associates, Inc.},
}
"""
@check_qmnist_dataset
def __init__(self, dataset_dir, usage=None, compat=True, num_samples=None, num_parallel_workers=None,
shuffle=None, sampler=None, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.usage = replace_none(usage, "all")
self.compat = compat
def parse(self, children=None):
return cde.QMnistNode(self.dataset_dir, self.usage, self.compat, self.sampler)
class MindDataset(MappableDataset):
"""
A source dataset for reading and parsing MindRecord dataset.
The columns of generated dataset depend on the source MindRecord files.
Args:
dataset_file (Union[str, list[str]]): If dataset_file is a str, it represents for
a file name of one component of a mindrecord source, other files with identical source
in the same path will be found and loaded automatically. If dataset_file is a list,
it represents for a list of dataset files to be read directly.
columns_list (list[str], optional): List of columns to be read (default=None).
num_parallel_workers (int, optional): The number of readers (default=None).
shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch
(default=None, performs global shuffle).
If shuffle is False, no shuffling will be performed;
If shuffle is True, the behavior is the same as setting shuffle to be Shuffle.GLOBAL
Otherwise, there are three levels of shuffling:
- Shuffle.GLOBAL: Global shuffle of all rows of data in dataset.
- Shuffle.FILES: Shuffle the file sequence but keep the order of data within each file.
- Shuffle.INFILE: Keep the file sequence the same but shuffle the data within each file.
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, 'num_samples' reflects the maximum sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, sampler is exclusive
with shuffle and block_reader). Support list: SubsetRandomSampler,
PkSampler, RandomSampler, SequentialSampler, DistributedSampler.
padded_sample (dict, optional): Samples will be appended to dataset, where
keys are the same as column_list.
num_padded (int, optional): Number of padding samples. Dataset size
plus num_padded should be divisible by num_shards.
num_samples (int, optional): The number of samples to be included in the dataset
(default=None, all samples).
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_files are not valid or do not exist.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> mind_dataset_dir = ["/path/to/mind_dataset_file"] # contains 1 or multiple MindRecord files
>>> dataset = ds.MindDataset(dataset_file=mind_dataset_dir)
"""
def parse(self, children=None):
return cde.MindDataNode(self.dataset_file, self.columns_list, self.sampler, self.new_padded_sample,
self.num_padded, shuffle_to_shuffle_mode(self.shuffle_option))
@check_minddataset
def __init__(self, dataset_file, columns_list=None, num_parallel_workers=None, shuffle=None, num_shards=None,
shard_id=None, sampler=None, padded_sample=None, num_padded=None, num_samples=None, cache=None):
if shuffle is not None and not isinstance(shuffle, (bool, Shuffle)):
raise TypeError("shuffle must be of boolean or enum of 'Shuffle' values like 'Shuffle.GLOBAL' or "
"'Shuffle.FILES' or 'Shuffle.INFILE'.")
self.shuffle_option = shuffle
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle_to_bool(shuffle), num_shards=num_shards, shard_id=shard_id, cache=cache)
if isinstance(dataset_file, list):
self.load_dataset = False
else:
self.load_dataset = True
self.dataset_file = dataset_file
self.columns_list = replace_none(columns_list, [])
if shuffle is False:
logger.warning("WARN: global shuffle is not used.")
if sampler is not None:
if isinstance(sampler, (
samplers.SubsetRandomSampler, samplers.SubsetSampler, samplers.PKSampler,
samplers.DistributedSampler,
samplers.RandomSampler, samplers.SequentialSampler)) is False:
raise ValueError("The sampler is not supported yet.")
self.padded_sample = padded_sample
self.num_padded = replace_none(num_padded, 0)
self.new_padded_sample = {}
if padded_sample:
for k, v in padded_sample.items():
if isinstance(v, np.ndarray):
self.new_padded_sample[k] = v.tobytes()
else:
self.new_padded_sample[k] = v
def _iter_fn(dataset, num_samples):
"""
Generator function wrapper for iterable dataset.
"""
if num_samples is not None and num_samples != 0:
ds_iter = iter(dataset)
for _ in range(num_samples):
try:
val = next(ds_iter)
except StopIteration:
return
# convert output tensors to ndarrays
yield _convert_row(val)
else:
for val in dataset:
# convert output tensors to ndarrays
yield _convert_row(val)
def _generator_fn(generator, num_samples):
"""
Generator function wrapper for generator function dataset.
"""
if num_samples is not None and num_samples != 0:
gen_iter = generator()
for _ in range(num_samples):
try:
val = next(gen_iter)
except StopIteration:
return
yield val
else:
gen_iter = generator()
for val in gen_iter:
yield val
def _cpp_sampler_fn(sample_ids, dataset):
"""
Generator function wrapper for mappable dataset with cpp sampler.
"""
if not isinstance(sample_ids, np.ndarray):
raise RuntimeError("Sample IDs are not in a numpy array.")
if sample_ids.size == 0:
raise RuntimeError("Sampler passed an empty sample IDs list.")
for i in sample_ids:
val = dataset[i]
# convert output tensors to ndarrays
yield _convert_row(val)
def _cpp_sampler_fn_mp(sample_ids, sample_fn):
"""
Multiprocessing generator function wrapper for mappable dataset with cpp sampler.
"""
if not isinstance(sample_ids, np.ndarray):
raise RuntimeError("Sample IDs are not in a numpy array.")
if sample_ids.size == 0:
raise RuntimeError("Sampler passed an empty sample IDs list.")
return sample_fn.process(sample_ids)
def _fill_worker_indices(workers, indices, idx):
"""
Worker index queue filler, fill worker index queue in round robin order.
"""
num_worker = len(workers)
while idx < len(indices):
try:
workers[idx % num_worker].put(indices[idx])
idx += 1
except queue.Full:
break
return idx
def _check_shm_usage(num_worker, queue_size, max_rowsize, num_queues=1):
"""
Check sufficient shared memory is available for shared memory queues
when training in parallel mode.
"""
threshold_ratio = 0.8
if platform.system() != "Windows":
shm_estimate_usage = _get_device_num() * num_worker * num_queues * \
(queue_size + 2) * max_rowsize * 1024 * 1024
try:
shm_available = psutil.disk_usage('/dev/shm').free
if shm_estimate_usage >= threshold_ratio * shm_available:
raise RuntimeError(
"Insufficient shared memory available. Required: {}, Available: {}. "
"The required memory can't exceed 80% of the available shared memory. "
"Recommend to set_enable_shared_mem to False, reduce max_rowsize or reduce num_parallel_workers."
.format(shm_estimate_usage, shm_available))
except FileNotFoundError:
raise RuntimeError("Expected /dev/shm to exist.")
def _convert_row(row):
"""
Convert Op return value to numpy
"""
value = []
if isinstance(row, dict):
raise ValueError("Return value in user defined python function should be numpy array, but got dict.")
# convert each column in row into numpy array
for x in row:
if isinstance(x, bytes): # got image bytes from a file
value.append(np.frombuffer(x, np.uint8))
elif isinstance(x, Tensor): # got mindspore.Tensor
value.append(x.asnumpy())
elif isinstance(x, dict):
raise ValueError("Return value in user defined python function should be numpy array, but got dict.")
else:
value.append(np.array(x, copy=False))
return tuple(value)
class SamplerFn:
"""
Multiprocessing or multithread generator function wrapper master process.
"""
def __init__(self, dataset, num_worker, multi_process, max_rowsize):
self.workers = []
self.num_worker = num_worker
self.multi_process = multi_process
self.need_join = False
self.ppid = os.getpid()
self.pids = []
# Event for end of epoch
if multi_process is True:
try:
self.eof = multiprocessing.Event()
except Exception:
raise RuntimeError("Init multiprocessing.Event() failed, This might be caused by insufficient shm,"
+ " and the recommended shm size is at least 5 GB.")
else:
self.eof = threading.Event()
# Create workers
# get default queue size and adjust queuesize per worker if there are large # workers
queue_size = get_prefetch_size()
queue_size = min(queue_size, queue_size * 4 // num_worker)
queue_size = max(2, queue_size)
if multi_process and get_enable_shared_mem():
_check_shm_usage(num_worker, queue_size, max_rowsize)
for _ in range(num_worker):
if multi_process is True:
try:
worker = _GeneratorWorkerMp(dataset, self.eof, max_rowsize, queue_size)
except Exception:
raise RuntimeError("Init multiprocessing.Queue() failed, This might be caused by insufficient shm,"
+ " and the recommended shm size is at least 5 GB.")
worker.daemon = True
# When multi processes fork a subprocess, the lock of the main process is copied to the subprocess,
# which may cause deadlock. Therefore, the subprocess startup is performed in che initialization phase.
# In this phase, the main process is not locked.
worker.start()
self.pids.append(worker.pid)
self.need_join = True
else:
worker = _GeneratorWorkerMt(dataset, self.eof)
worker.daemon = True
self.workers.append(worker)
if multi_process is True and platform.system().lower() != 'windows':
self.eot = threading.Event()
self.watch_dog = threading.Thread(target=_watch_dog, args=(self.eot, self.pids))
self.watch_dog.daemon = True
self.watch_dog.start()
def process(self, indices):
"""
The main process, start the child process or child thread, and fill the index queue.
Get the result and return.
"""
for w in self.workers:
# Check whether the queue of the subprocess is empty.
if not w.queue_empty():
raise Exception("The queue of the subprocess is not empty.")
# Start all workers
if not w.is_alive():
w.start()
# Fill initial index queues
idx_cursor = 0
idx_cursor = _fill_worker_indices(self.workers, indices, idx_cursor)
# Fetch results
for i in range(len(indices)):
if self.eof.is_set():
self._stop_subprocess()
return
if self.multi_process is True and not psutil.pid_exists(self.workers[i % self.num_worker].pid):
self._stop_subprocess()
return
# Fetch result and put index
try:
result = self.workers[i % self.num_worker].get()
if isinstance(result, ExceptionHandler):
result.reraise()
except queue.Empty:
self._stop_subprocess()
raise Exception("Generator worker process timeout.")
except KeyboardInterrupt:
self._stop_subprocess()
raise Exception("Generator worker receives KeyboardInterrupt.")
if self.eof.is_set():
self._stop_subprocess()
return
if idx_cursor < len(indices):
idx_cursor = _fill_worker_indices(self.workers, indices, idx_cursor)
yield _convert_row(result)
def _stop_subprocess(self):
# Only the main process can call join
if self.need_join is True and self.ppid == os.getpid():
self.eof.set()
self.need_join = False
for w in self.workers:
if psutil.pid_exists(w.pid):
w.join()
self._abort_watchdog()
def _abort_watchdog(self):
if hasattr(self, 'eot') and self.eot is not None and not self.eot.is_set():
self.eot.set()
def __del__(self):
self._stop_subprocess()
def _subprocess_handle(eof, signum, frame):
threading.Thread(target=eof.set()).start()
def _generator_worker_loop(dataset, idx_queue, result_queue, eof, is_multiprocessing):
"""
Multithread or multiprocess generator worker process loop.
"""
if is_multiprocessing:
signal.signal(signal.SIGTERM, partial(_subprocess_handle, eof))
while True:
# Fetch index, block
try:
idx = idx_queue.get(timeout=1)
except KeyboardInterrupt:
if is_multiprocessing:
eof.set()
idx_queue.cancel_join_thread()
result_queue.cancel_join_thread()
raise Exception("Generator worker receives KeyboardInterrupt.")
except queue.Empty:
if eof.is_set():
if is_multiprocessing:
idx_queue.cancel_join_thread()
result_queue.cancel_join_thread()
return
# If end-of-file (eof) is not set, continue to get data from idx_queue
continue
if idx is None:
# When the queue is out of scope from master process, a None item can be fetched from the queue.
# Upon receiving None, worker process should check if eof is set.
if not eof.is_set():
raise Exception("")
return
if eof.is_set():
if is_multiprocessing:
idx_queue.cancel_join_thread()
result_queue.cancel_join_thread()
return
# Fetch data, any exception from __getitem__ will terminate worker and timeout master process
try:
result = dataset[idx]
except Exception:
result = ExceptionHandler(where="in GeneratorDataset worker process")
# Send data, block
while True:
try:
result_queue.put(result, timeout=5)
except KeyboardInterrupt:
if is_multiprocessing:
eof.set()
idx_queue.cancel_join_thread()
result_queue.cancel_join_thread()
raise Exception("Generator worker receives KeyboardInterrupt.")
except queue.Full:
if eof.is_set():
if is_multiprocessing:
idx_queue.cancel_join_thread()
result_queue.cancel_join_thread()
return
# If eof is not set, continue to put data to result_queue
continue
break
del result, idx
class _GeneratorWorkerMt(threading.Thread):
"""
Worker process for multi-thread Generator.
"""
def __init__(self, dataset, eof):
self.idx_queue = queue.Queue(16)
self.res_queue = queue.Queue(16)
super().__init__(target=_generator_worker_loop, args=(dataset, self.idx_queue, self.res_queue, eof, False))
def put(self, item):
"""
Put function for worker index queue. Never block. Raise queue.Full on failure.
"""
self.idx_queue.put_nowait(item)
def get(self):
"""
Get function for worker result queue. Block with timeout.
"""
return self.res_queue.get(timeout=30)
def queue_empty(self):
if not self.idx_queue.empty():
logger.warning("idx_queue is not empty")
return False
if not self.res_queue.empty():
logger.warning("res_queue is not empty")
return False
return True
class _GeneratorWorkerMp(multiprocessing.Process):
"""
Worker process for multiprocess Generator.
"""
def __init__(self, dataset, eof, max_rowsize, queue_size):
self.idx_queue = multiprocessing.Queue(queue_size)
if get_enable_shared_mem():
self.res_queue = _SharedQueue(queue_size, max_rowsize=max_rowsize)
else:
self.res_queue = multiprocessing.Queue(queue_size)
self.idx_queue._joincancelled = True # pylint: disable=W0212
self.res_queue._joincancelled = True # pylint: disable=W0212
super().__init__(target=_generator_worker_loop, args=(dataset, self.idx_queue, self.res_queue, eof, True))
def put(self, item):
"""
Put function for worker index queue. Never block. Raise queue.Full on failure.
"""
self.idx_queue.put_nowait(item)
def get(self):
"""
Get function for worker result queue. Block with timeout.
"""
# Relax 10s to 30s, since it sometimes will cause "Generator worker process timeout"
# when we run too many iterators with infinite epoch(num_epoch=-1)
return self.res_queue.get(timeout=30)
def queue_empty(self):
if not self.idx_queue.empty():
logger.warning("idx_queue is not empty.")
return False
if not self.res_queue.empty():
logger.warning("res_queue is not empty.")
return False
return True
class GeneratorDataset(MappableDataset):
"""
A source dataset that generates data from Python by invoking Python data source each epoch.
The column names and column types of generated dataset depend on Python data defined by users.
Args:
source (Union[Callable, Iterable, Random Accessible]):
A generator callable object, an iterable Python object or a random accessible Python object.
Callable source is required to return a tuple of NumPy arrays as a row of the dataset on source().next().
Iterable source is required to return a tuple of NumPy arrays as a row of the dataset on
iter(source).next().
Random accessible source is required to return a tuple of NumPy arrays as a row of the dataset on
source[idx].
column_names (Union[str, list[str]], optional): List of column names of the dataset (default=None). Users are
required to provide either column_names or schema.
column_types (list[mindspore.dtype], optional): List of column data types of the dataset (default=None).
If provided, sanity check will be performed on generator output.
schema (Union[Schema, str], optional): Path to the JSON schema file or schema object (default=None). Users are
required to provide either column_names or schema. If both are provided, schema will be used.
num_samples (int, optional): The number of samples to be included in the dataset
(default=None, all images).
num_parallel_workers (int, optional): Number of subprocesses used to fetch the dataset in parallel (default=1).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset. Random accessible input is required.
(default=None, expected order behavior shown in the table).
sampler (Union[Sampler, Iterable], optional): Object used to choose samples from the dataset. Random accessible
input is required (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
Random accessible input is required. When this argument is specified, `num_samples` reflects the maximum
sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This argument must be specified only
when num_shards is also specified. Random accessible input is required.
python_multiprocessing (bool, optional): Parallelize Python operations with multiple worker process. This
option could be beneficial if the Python operation is computational heavy (default=True).
max_rowsize(int, optional): Maximum size of row in MB that is used for shared memory allocation to copy
data between processes. This is only used if python_multiprocessing is set to True (default 6 MB).
Raises:
RuntimeError: If source raises an exception during execution.
RuntimeError: If len of column_names does not match output len of source.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> import numpy as np
>>>
>>> # 1) Multidimensional generator function as callable input.
>>> def generator_multidimensional():
... for i in range(64):
... yield (np.array([[i, i + 1], [i + 2, i + 3]]),)
>>>
>>> dataset = ds.GeneratorDataset(source=generator_multidimensional, column_names=["multi_dimensional_data"])
>>>
>>> # 2) Multi-column generator function as callable input.
>>> def generator_multi_column():
... for i in range(64):
... yield np.array([i]), np.array([[i, i + 1], [i + 2, i + 3]])
>>>
>>> dataset = ds.GeneratorDataset(source=generator_multi_column, column_names=["col1", "col2"])
>>>
>>> # 3) Iterable dataset as iterable input.
>>> class MyIterable:
... def __init__(self):
... self._index = 0
... self._data = np.random.sample((5, 2))
... self._label = np.random.sample((5, 1))
...
... def __next__(self):
... if self._index >= len(self._data):
... raise StopIteration
... else:
... item = (self._data[self._index], self._label[self._index])
... self._index += 1
... return item
...
... def __iter__(self):
... self._index = 0
... return self
...
... def __len__(self):
... return len(self._data)
>>>
>>> dataset = ds.GeneratorDataset(source=MyIterable(), column_names=["data", "label"])
>>>
>>> # 4) Random accessible dataset as random accessible input.
>>> class MyAccessible:
... def __init__(self):
... self._data = np.random.sample((5, 2))
... self._label = np.random.sample((5, 1))
...
... def __getitem__(self, index):
... return self._data[index], self._label[index]
...
... def __len__(self):
... return len(self._data)
>>>
>>> dataset = ds.GeneratorDataset(source=MyAccessible(), column_names=["data", "label"])
>>>
>>> # list, dict, tuple of Python is also random accessible
>>> dataset = ds.GeneratorDataset(source=[(np.array(0),), (np.array(1),), (np.array(2),)], column_names=["col"])
"""
@check_generatordataset
def __init__(self, source, column_names=None, column_types=None, schema=None, num_samples=None,
num_parallel_workers=1, shuffle=None, sampler=None, num_shards=None, shard_id=None,
python_multiprocessing=True, max_rowsize=6):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id)
self.source = source
self.prepared_source = None # source to be sent to C++
self.python_multiprocessing = python_multiprocessing
self.column_names = to_list(column_names)
if column_types is not None:
self.column_types = mstypelist_to_detypelist(column_types)
else:
self.column_types = []
self.schema = schema
if schema is not None:
self.schema = schema
if not isinstance(schema, Schema):
self.schema = Schema(schema)
# Move get dataset_size by len from parse to here, because self.source will
# lose attribution of '__len__' after deepcopy.
self.source_len = -1 # unknown
if hasattr(self.source, "__len__"):
self.source_len = len(self.source)
self.max_rowsize = max_rowsize
self.sample_fn = None
def __deepcopy__(self, memodict):
if id(self) in memodict:
return memodict[id(self)]
new_op = self.__safe_deepcopy__(memodict, exclude=("source", "__transfer_dataset__"))
sample_fn = None
if new_op.sampler is not None and hasattr(self.source, "__getitem__"):
# The reason why there is a try catch here is because when the new op is being constructed with shared
# memory enabled, there will be an exception thrown if there is not enough shared memory available
if self.source_len == -1:
raise RuntimeError("Attempt to construct a random access dataset, '__len__' method is required!")
try:
if new_op.num_parallel_workers > 1:
# if use num_parallel_workers is to large when python_multiprocessing=True which would cause OOM error
# get the num_shards
valid_num_shards = 1
if isinstance(self.sampler, samplers.DistributedSampler):
valid_num_shards = self.sampler.num_shards
elif self.num_shards is not None:
valid_num_shards = self.num_shards
# get process memory usage
process = psutil.Process(os.getpid())
process_memory = process.memory_info().rss
sys_memory = psutil.virtual_memory().total
total_memory_maybe_used = process_memory * (new_op.num_parallel_workers + 1) * valid_num_shards
if total_memory_maybe_used / sys_memory > 0.85:
valid_num_worker = math.floor(sys_memory * 0.85 / valid_num_shards / process_memory - 1)
valid_num_worker = 1 if valid_num_worker <= 0 else valid_num_worker
if total_memory_maybe_used / sys_memory > 1.0:
info = "GeneratorDataset num_parallel_workers: " + str(new_op.num_parallel_workers) + \
" is too large which maybe cause a lot of memory occupation (>100%) during multi " \
"process running. Therefore, it is recommended to reduce num_parallel_workers to " \
+ str(valid_num_worker) + " or smaller."
raise RuntimeError(info)
info = "GeneratorDataset num_parallel_workers: " + str(new_op.num_parallel_workers) + \
" is too large which maybe cause a lot of memory occupation (>85%) during multi " \
"process running. Therefore, it is recommended to reduce num_parallel_workers to " \
+ str(valid_num_worker) + " or smaller."
logger.warning(info)
sample_fn = SamplerFn(self.source, new_op.num_parallel_workers, self.python_multiprocessing,
self.max_rowsize)
new_op.prepared_source = (lambda sample_ids: _cpp_sampler_fn_mp(sample_ids, sample_fn))
else:
new_op.prepared_source = (lambda sample_ids: _cpp_sampler_fn(sample_ids, self.source))
new_op.sample_fn = sample_fn
except RuntimeError as e:
raise Exception(str(e))
else:
try:
new_op.sampler = None
new_op.sample_fn = sample_fn
new_op.source_len = min(new_op.source_len,
new_op.num_samples) if new_op.num_samples != 0 else new_op.source_len
iter(self.source)
except TypeError:
# Use generator function if input callable
new_op.prepared_source = (lambda: _generator_fn(self.source, new_op.num_samples))
else:
# Use iterator function if input is iterable
# Random accessible input is also iterable
new_op.prepared_source = (lambda: _iter_fn(self.source, new_op.num_samples))
return new_op
def is_shuffled(self):
return self.sampler.is_shuffled()
def is_sharded(self):
return self.sampler.is_sharded()
def parse(self, children=None):
if self.schema is None:
return cde.GeneratorNode(self.prepared_source, self.column_names, self.column_types, self.source_len,
self.sampler, self.num_parallel_workers)
schema = self.schema
if isinstance(schema, Schema):
schema = self.schema.cpp_schema
return cde.GeneratorNode(self.prepared_source, schema, self.source_len, self.sampler,
self.num_parallel_workers)
class TFRecordDataset(SourceDataset):
"""
A source dataset for reading and parsing datasets stored on disk in TFData format.
The columns of generated dataset depend on the source TFRecord files.
Args:
dataset_files (Union[str, list[str]]): String or list of files to be read or glob strings to search for a
pattern of files. The list will be sorted in a lexicographical order.
schema (Union[str, Schema], optional): Path to the JSON schema file or schema object (default=None).
If the schema is not provided, the meta data from the TFData file is considered the schema.
columns_list (list[str], optional): List of columns to be read (default=None, read all columns).
num_samples (int, optional): The number of samples (rows) to be included in the dataset (default=None).
If num_samples is None and numRows(parsed from schema) does not exist, read the full dataset;
If num_samples is None and numRows(parsed from schema) is greater than 0, read numRows rows;
If both num_samples and numRows(parsed from schema) are greater than 0, read num_samples rows.
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch
(default=Shuffle.GLOBAL).
If shuffle is False, no shuffling will be performed;
If shuffle is True, the behavior is the same as setting shuffle to be Shuffle.GLOBAL
Otherwise, there are two levels of shuffling:
- Shuffle.GLOBAL: Shuffle both the files and samples.
- Shuffle.FILES: Shuffle files only.
num_shards (int, optional): Number of shards that the dataset will be divided
into (default=None). When this argument is specified, `num_samples` reflects
the maximum sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
shard_equal_rows (bool, optional): Get equal rows for all shards(default=False). If shard_equal_rows
is false, number of rows of each shard may be not equal, and may lead to a failure in distributed training.
When the number of samples of per TFRecord file are not equal, it is suggested to set to true.
This argument should only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_files are not valid or do not exist.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Examples:
>>> from mindspore import dtype as mstype
>>>
>>> tfrecord_dataset_dir = ["/path/to/tfrecord_dataset_file"] # contains 1 or multiple TFRecord files
>>> tfrecord_schema_file = "/path/to/tfrecord_schema_file"
>>>
>>> # 1) Get all rows from tfrecord_dataset_dir with no explicit schema.
>>> # The meta-data in the first row will be used as a schema.
>>> dataset = ds.TFRecordDataset(dataset_files=tfrecord_dataset_dir)
>>>
>>> # 2) Get all rows from tfrecord_dataset_dir with user-defined schema.
>>> schema = ds.Schema()
>>> schema.add_column(name='col_1d', de_type=mstype.int64, shape=[2])
>>> dataset = ds.TFRecordDataset(dataset_files=tfrecord_dataset_dir, schema=schema)
>>>
>>> # 3) Get all rows from tfrecord_dataset_dir with schema file.
>>> dataset = ds.TFRecordDataset(dataset_files=tfrecord_dataset_dir, schema=tfrecord_schema_file)
"""
@check_tfrecorddataset
def __init__(self, dataset_files, schema=None, columns_list=None, num_samples=None, num_parallel_workers=None,
shuffle=Shuffle.GLOBAL, num_shards=None, shard_id=None, shard_equal_rows=False, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,
num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_files = self._find_files(dataset_files)
self.dataset_files.sort()
self.schema = schema
self.columns_list = replace_none(columns_list, [])
self.shard_equal_rows = replace_none(shard_equal_rows, False)
if self.schema is not None and (self.num_samples is None or self.num_samples == 0):
self.num_samples = Schema.get_num_rows(self.schema)
def parse(self, children=None):
schema = self.schema.cpp_schema if isinstance(self.schema, Schema) else self.schema
return cde.TFRecordNode(self.dataset_files, schema, self.columns_list, self.num_samples, self.shuffle_flag,
self.num_shards, self.shard_id, self.shard_equal_rows)
class ManifestDataset(MappableDataset):
"""
A source dataset for reading images from a Manifest file.
The generated dataset has two columns: :py:obj:`[image, label]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`label` is of a scalar of uint64 type.
Args:
dataset_file (str): File to be read.
usage (str, optional): Acceptable usages include `train`, `eval` and `inference` (default= `train`).
num_samples (int, optional): The number of images to be included in the dataset.
(default=None, will include all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, will use value set in the config).
shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None, expected
order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
class_indexing (dict, optional): A str-to-int mapping from label name to index
(default=None, the folder names will be sorted alphabetically and each
class will be given a unique index starting from 0).
decode (bool, optional): decode the images after reading (default=False).
num_shards (int, optional): Number of shards that the dataset will be divided
into (default=None). When this argument is specified, `num_samples` reflects
the max number of samples per shard.
shard_id (int, optional): The shard ID within `num_shards` (default=None). This
argument can only be specified when `num_shards` is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_files are not valid or do not exist.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
RuntimeError: If class_indexing is not a dictionary.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- The shape of the image column is [image_size] if decode flag is False, or [H,W,C] otherwise.
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> manifest_dataset_dir = "/path/to/manifest_dataset_file"
>>>
>>> # 1) Read all samples specified in manifest_dataset_dir dataset with 8 threads for training
>>> dataset = ds.ManifestDataset(dataset_file=manifest_dataset_dir, usage="train", num_parallel_workers=8)
>>>
>>> # 2) Read samples (specified in manifest_file.manifest) for shard 0 in a 2-way distributed training setup
>>> dataset = ds.ManifestDataset(dataset_file=manifest_dataset_dir, num_shards=2, shard_id=0)
"""
@check_manifestdataset
def __init__(self, dataset_file, usage="train", num_samples=None, num_parallel_workers=None, shuffle=None,
sampler=None, class_indexing=None, decode=False, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_file = dataset_file
self.decode = replace_none(decode, False)
self.usage = replace_none(usage, "train")
self.class_indexing = replace_none(class_indexing, {})
def parse(self, children=None):
return cde.ManifestNode(self.dataset_file, self.usage, self.sampler, self.class_indexing, self.decode)
def get_class_indexing(self):
"""
Get the class index.
Returns:
dict, a str-to-int mapping from label name to index.
Examples:
>>> manifest_dataset_dir = "/path/to/manifest_dataset_file"
>>>
>>> dataset = ds.ManifestDataset(dataset_file=manifest_dataset_dir)
>>> class_indexing = dataset.get_class_indexing()
"""
if self.class_indexing is None or not self.class_indexing:
if self._class_indexing is None:
runtime_getter = self._init_tree_getters()
self._class_indexing = runtime_getter[0].GetClassIndexing()
self.class_indexing = {}
for pair in self._class_indexing:
self.class_indexing[pair[0]] = pair[1][0]
return self.class_indexing
class Cifar10Dataset(MappableDataset):
"""
A source dataset for reading and parsing Cifar10 dataset.
This api only supports parsing Cifar10 file in binary version now.
The generated dataset has two columns :py:obj:`[image, label]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`label` is a scalar of the uint32 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str, optional): Usage of this dataset, can be `train`, `test` or `all` . `train` will read from 50,000
train samples, `test` will read from 10,000 test samples, `all` will read from all 60,000 samples
(default=None, all samples).
num_samples (int, optional): The number of images to be included in the dataset
(default=None, all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None, expected
order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided
into (default=None). When this argument is specified, `num_samples` reflects
the maximum sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> cifar10_dataset_dir = "/path/to/cifar10_dataset_directory"
>>>
>>> # 1) Get all samples from CIFAR10 dataset in sequence
>>> dataset = ds.Cifar10Dataset(dataset_dir=cifar10_dataset_dir, shuffle=False)
>>>
>>> # 2) Randomly select 350 samples from CIFAR10 dataset
>>> dataset = ds.Cifar10Dataset(dataset_dir=cifar10_dataset_dir, num_samples=350, shuffle=True)
>>>
>>> # 3) Get samples from CIFAR10 dataset for shard 0 in a 2-way distributed training
>>> dataset = ds.Cifar10Dataset(dataset_dir=cifar10_dataset_dir, num_shards=2, shard_id=0)
>>>
>>> # In CIFAR10 dataset, each dictionary has keys "image" and "label"
About CIFAR-10 dataset:
The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes,
with 6000 images per class. There are 50000 training images and 10000 test images.
The 10 different classes represent airplanes, cars, birds, cats, deer, dogs, frogs, horses, ships, and trucks.
Here is the original CIFAR-10 dataset structure.
You can unzip the dataset files into the following directory structure and read by MindSpore's API.
.. code-block::
.
└── cifar-10-batches-bin
├── data_batch_1.bin
├── data_batch_2.bin
├── data_batch_3.bin
├── data_batch_4.bin
├── data_batch_5.bin
├── test_batch.bin
├── readme.html
└── batches.meta.txt
Citation:
.. code-block::
@techreport{Krizhevsky09,
author = {Alex Krizhevsky},
title = {Learning multiple layers of features from tiny images},
institution = {},
year = {2009},
howpublished = {http://www.cs.toronto.edu/~kriz/cifar.html}
}
"""
@check_mnist_cifar_dataset
def __init__(self, dataset_dir, usage=None, num_samples=None, num_parallel_workers=None, shuffle=None,
sampler=None, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.usage = replace_none(usage, "all")
def parse(self, children=None):
return cde.Cifar10Node(self.dataset_dir, self.usage, self.sampler)
class Cifar100Dataset(MappableDataset):
"""
A source dataset for reading and parsing Cifar100 dataset.
The generated dataset has three columns :py:obj:`[image, coarse_label, fine_label]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`coarse_label` and :py:obj:`fine_labels` are each a scalar of uint32 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str, optional): Usage of this dataset, can be `train`, `test` or `all` . `train` will read from 50,000
train samples, `test` will read from 10,000 test samples, `all` will read from all 60,000 samples
(default=None, all samples).
num_samples (int, optional): The number of images to be included in the dataset
(default=None, all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None, expected
order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided
into (default=None). When this argument is specified, 'num_samples' reflects
the maximum sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and shuffle
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> cifar100_dataset_dir = "/path/to/cifar100_dataset_directory"
>>>
>>> # 1) Get all samples from CIFAR100 dataset in sequence
>>> dataset = ds.Cifar100Dataset(dataset_dir=cifar100_dataset_dir, shuffle=False)
>>>
>>> # 2) Randomly select 350 samples from CIFAR100 dataset
>>> dataset = ds.Cifar100Dataset(dataset_dir=cifar100_dataset_dir, num_samples=350, shuffle=True)
>>>
>>> # In CIFAR100 dataset, each dictionary has 3 keys: "image", "fine_label" and "coarse_label"
About CIFAR-100 dataset:
This dataset is just like the CIFAR-10, except it has 100 classes containing 600 images
each. There are 500 training images and 100 testing images per class. The 100 classes in
the CIFAR-100 are grouped into 20 superclasses. Each image comes with a "fine" label (the
class to which it belongs) and a "coarse" label (the superclass to which it belongs).
Here is the original CIFAR-100 dataset structure.
You can unzip the dataset files into the following directory structure and read by MindSpore's API.
.. code-block::
.
└── cifar-100-binary
├── train.bin
├── test.bin
├── fine_label_names.txt
└── coarse_label_names.txt
Citation:
.. code-block::
@techreport{Krizhevsky09,
author = {Alex Krizhevsky},
title = {Learning multiple layers of features from tiny images},
institution = {},
year = {2009},
howpublished = {http://www.cs.toronto.edu/~kriz/cifar.html}
}
"""
@check_mnist_cifar_dataset
def __init__(self, dataset_dir, usage=None, num_samples=None, num_parallel_workers=None, shuffle=None,
sampler=None, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.usage = replace_none(usage, "all")
def parse(self, children=None):
return cde.Cifar100Node(self.dataset_dir, self.usage, self.sampler)
class RandomDataset(SourceDataset):
"""
A source dataset that generates random data.
Args:
total_rows (int, optional): Number of samples for the dataset to generate
(default=None, number of samples is random).
schema (Union[str, Schema], optional): Path to the JSON schema file or schema object (default=None).
If the schema is not provided, the random dataset generates a random schema.
columns_list (list[str], optional): List of columns to be read (default=None, read all columns)
num_samples (int, optional): The number of samples to be included in the dataset
(default=None, all samples).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset
(default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided
into (default=None). When this argument is specified, 'num_samples' reflects
the maximum sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
"""
@check_random_dataset
def __init__(self, total_rows=None, schema=None, columns_list=None, num_samples=None, num_parallel_workers=None,
cache=None, shuffle=None, num_shards=None, shard_id=None):
super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,
num_shards=num_shards, shard_id=shard_id, cache=cache)
self.total_rows = total_rows
if schema is not None:
self.total_rows = replace_none(total_rows, Schema.get_num_rows(schema))
self.schema = schema
self.columns_list = replace_none(columns_list, [])
def parse(self, children=None):
schema = self.schema.cpp_schema if isinstance(self.schema, Schema) else self.schema
return cde.RandomNode(self.total_rows, schema, self.columns_list)
class Schema:
"""
Class to represent a schema of a dataset.
Args:
schema_file(str): Path of the schema file (default=None).
Returns:
Schema object, schema info about dataset.
Raises:
RuntimeError: If schema file failed to load.
Examples:
>>> from mindspore import dtype as mstype
>>>
>>> # Create schema; specify column name, mindspore.dtype and shape of the column
>>> schema = ds.Schema()
>>> schema.add_column(name='col1', de_type=mstype.int64, shape=[2])
"""
@check_schema
def __init__(self, schema_file=None):
self.schema_file = replace_none(schema_file, "")
self.cpp_schema = cde.SchemaObj(self.schema_file)
@check_add_column
def add_column(self, name, de_type, shape=None):
"""
Add new column to the schema.
Args:
name (str): The new name of the column.
de_type (str): Data type of the column.
shape (list[int], optional): Shape of the column
(default=None, [-1] which is an unknown shape of rank 1).
Raises:
ValueError: If column type is unknown.
"""
if isinstance(de_type, typing.Type):
de_type = mstype_to_detype(de_type)
col_type = str(de_type)
else:
col_type = str(cde.DataType(de_type))
if shape is None:
self.cpp_schema.add_column(name, col_type)
else:
self.cpp_schema.add_column(name, col_type, shape)
def parse_columns(self, columns):
"""
Parse the columns and add it to self.
Args:
columns (Union[dict, list[dict], tuple[dict]]): Dataset attribute information, decoded from schema file.
- list[dict], 'name' and 'type' must be in keys, 'shape' optional.
- dict, columns.keys() as name, columns.values() is dict, and 'type' inside, 'shape' optional.
Raises:
RuntimeError: If failed to parse columns.
RuntimeError: If column's name field is missing.
RuntimeError: If column's type field is missing.
Examples:
>>> schema = Schema()
>>> columns1 = [{'name': 'image', 'type': 'int8', 'shape': [3, 3]},
>>> {'name': 'label', 'type': 'int8', 'shape': [1]}]
>>> schema.parse_columns(columns1)
>>> columns2 = {'image': {'shape': [3, 3], 'type': 'int8'}, 'label': {'shape': [1], 'type': 'int8'}}
>>> schema.parse_columns(columns2)
"""
self.cpp_schema.parse_columns(json.dumps(columns, indent=2))
def to_json(self):
"""
Get a JSON string of the schema.
Returns:
str, JSON string of the schema.
"""
return self.cpp_schema.to_json()
def from_json(self, json_obj):
"""
Get schema file from JSON object.
Args:
json_obj(dictionary): Object of JSON parsed.
Raises:
RuntimeError: if there is unknown item in the object.
RuntimeError: if dataset type is missing in the object.
RuntimeError: if columns are missing in the object.
"""
self.cpp_schema.from_string(json.dumps(json_obj, indent=2))
def __str__(self):
return self.to_json()
@staticmethod
def get_num_rows(schema):
schema_obj = schema
if not isinstance(schema_obj, Schema):
schema_obj = Schema(schema_obj)
return schema_obj.cpp_schema.get_num_rows()
class USPSDataset(SourceDataset):
"""
A source dataset for reading and parsing the USPS dataset.
The generated dataset has two columns: :py:obj:`[image, label]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`label` is of a scalar of uint32 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str, optional): Usage of this dataset, can be "train", "test" or "all". "train" will read from 7,291
train samples, "test" will read from 2,007 test samples, "all" will read from all 9,298 samples.
(default=None, will read all samples)
num_samples (int, optional): The number of images to be included in the dataset
(default=None, will read all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, will use value set in the config).
shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch
(default=Shuffle.GLOBAL).
If shuffle is False, no shuffling will be performed;
If shuffle is True, the behavior is the same as setting shuffle to be Shuffle.GLOBAL
Otherwise, there are two levels of shuffling:
- Shuffle.GLOBAL: Shuffle both the files and samples.
- Shuffle.FILES: Shuffle files only.
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the max sample number of per shard.
shard_id (int, optional): The shard ID within `num_shards` (default=None). This
argument can only be specified when `num_shards` is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir is not valid or does not exist or does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If usage is invalid.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Examples:
>>> usps_dataset_dir = "/path/to/usps_dataset_directory"
>>>
>>> # Read 3 samples from USPS dataset
>>> dataset = ds.USPSDataset(dataset_dir=usps_dataset_dir, num_samples=3)
>>>
>>> # Note: In USPS dataset, each dictionary has keys "image" and "label"
About USPS dataset:
USPS is a digit dataset automatically scanned from envelopes by the U.S. Postal Service
containing a total of 9,298 16×16 pixel grayscale samples.
The images are centered, normalized and show a broad range of font styles.
Here is the original USPS dataset structure.
You can download and unzip the dataset files into this directory structure and read by MindSpore's API.
.. code-block::
.
└── usps_dataset_dir
├── usps
├── usps.t
Citation:
.. code-block::
@article{hull1994database,
title={A database for handwritten text recognition research},
author={Hull, Jonathan J.},
journal={IEEE Transactions on pattern analysis and machine intelligence},
volume={16},
number={5},
pages={550--554},
year={1994},
publisher={IEEE}
}
"""
@check_usps_dataset
def __init__(self, dataset_dir, usage=None, num_samples=None, num_parallel_workers=None, shuffle=Shuffle.GLOBAL,
num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,
num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.usage = replace_none(usage, "all")
def parse(self, children=None):
return cde.USPSNode(self.dataset_dir, self.usage, self.num_samples, self.shuffle_flag, self.num_shards,
self.shard_id)
class VOCDataset(MappableDataset):
"""
A source dataset for reading and parsing VOC dataset.
The generated dataset with different task setting has different output columns:
- task = :py:obj:`Detection`, output columns: :py:obj:`[image, dtype=uint8]`, :py:obj:`[bbox, dtype=float32]`, \
:py:obj:`[label, dtype=uint32]`, :py:obj:`[difficult, dtype=uint32]`, :py:obj:`[truncate, dtype=uint32]`.
- task = :py:obj:`Segmentation`, output columns: :py:obj:`[image, dtype=uint8]`, :py:obj:`[target,dtype=uint8]`.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
task (str, optional): Set the task type of reading voc data, now only support `Segmentation` or `Detection`
(default= `Segmentation`).
usage (str, optional): Set the task type of ImageSets(default= `train`). If task is `Segmentation`, image and
annotation list will be loaded in ./ImageSets/Segmentation/usage + ".txt"; If task is `Detection`, image and
annotation list will be loaded in ./ImageSets/Main/usage + ".txt"; if task and usage are not set, image and
annotation list will be loaded in ./ImageSets/Segmentation/train.txt as default.
class_indexing (dict, optional): A str-to-int mapping from label name to index, only valid in
`Detection` task (default=None, the folder names will be sorted alphabetically and each
class will be given a unique index starting from 0).
num_samples (int, optional): The number of images to be included in the dataset
(default=None, all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None, expected
order behavior shown in the table).
decode (bool, optional): Decode the images after reading (default=False).
sampler (Sampler, optional): Object used to choose samples from the dataset
(default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided
into (default=None). When this argument is specified, `num_samples` reflects
the maximum sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
extra_metadata(bool, optional): Flag to add extra meta-data to row. If True, an additional column named
:py:obj:`[_meta-filename, dtype=string]` will be output at the end (default=False).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If xml of Annotations is an invalid format.
RuntimeError: If xml of Annotations loss attribution of `object`.
RuntimeError: If xml of Annotations loss attribution of `bndbox`.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If task is not equal 'Segmentation' or 'Detection'.
ValueError: If task equal 'Segmentation' but class_indexing is not None.
ValueError: If txt related to mode is not exist.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- Column '[_meta-filename, dtype=string]' won't be output unless an explicit rename dataset op
is added to remove the prefix('_meta-').
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> voc_dataset_dir = "/path/to/voc_dataset_directory"
>>>
>>> # 1) Read VOC data for segmentation training
>>> dataset = ds.VOCDataset(dataset_dir=voc_dataset_dir, task="Segmentation", usage="train")
>>>
>>> # 2) Read VOC data for detection training
>>> dataset = ds.VOCDataset(dataset_dir=voc_dataset_dir, task="Detection", usage="train")
>>>
>>> # 3) Read all VOC dataset samples in voc_dataset_dir with 8 threads in random order
>>> dataset = ds.VOCDataset(dataset_dir=voc_dataset_dir, task="Detection", usage="train",
... num_parallel_workers=8)
>>>
>>> # 4) Read then decode all VOC dataset samples in voc_dataset_dir in sequence
>>> dataset = ds.VOCDataset(dataset_dir=voc_dataset_dir, task="Detection", usage="train",
... decode=True, shuffle=False)
>>>
>>> # In VOC dataset, if task='Segmentation', each dictionary has keys "image" and "target"
>>> # In VOC dataset, if task='Detection', each dictionary has keys "image" and "annotation"
About VOC dataset.
The PASCAL Visual Object Classes (VOC) challenge is a benchmark in visual
object category recognition and detection, providing the vision and machine
learning communities with a standard dataset of images and annotation, and
standard evaluation procedures.
You can unzip the original VOC-2012 dataset files into this directory structure and read by MindSpore's API.
.. code-block::
.
└── voc2012_dataset_dir
├── Annotations
│ ├── 2007_000027.xml
│ ├── 2007_000032.xml
│ ├── ...
├── ImageSets
│ ├── Action
│ ├── Layout
│ ├── Main
│ └── Segmentation
├── JPEGImages
│ ├── 2007_000027.jpg
│ ├── 2007_000032.jpg
│ ├── ...
├── SegmentationClass
│ ├── 2007_000032.png
│ ├── 2007_000033.png
│ ├── ...
└── SegmentationObject
├── 2007_000032.png
├── 2007_000033.png
├── ...
Citation:
.. code-block::
@article{Everingham10,
author = {Everingham, M. and Van~Gool, L. and Williams, C. K. I. and Winn, J. and Zisserman, A.},
title = {The Pascal Visual Object Classes (VOC) Challenge},
journal = {International Journal of Computer Vision},
volume = {88},
year = {2012},
number = {2},
month = {jun},
pages = {303--338},
biburl = {http://host.robots.ox.ac.uk/pascal/VOC/pubs/everingham10.html#bibtex},
howpublished = {http://host.robots.ox.ac.uk/pascal/VOC/voc2012/index.html}
}
"""
@check_vocdataset
def __init__(self, dataset_dir, task="Segmentation", usage="train", class_indexing=None, num_samples=None,
num_parallel_workers=None, shuffle=None, decode=False, sampler=None, num_shards=None, shard_id=None,
cache=None, extra_metadata=False):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.task = replace_none(task, "Segmentation")
self.usage = replace_none(usage, "train")
self.class_indexing = replace_none(class_indexing, {})
self.decode = replace_none(decode, False)
self.extra_metadata = extra_metadata
def parse(self, children=None):
return cde.VOCNode(self.dataset_dir, self.task, self.usage, self.class_indexing, self.decode, self.sampler,
self.extra_metadata)
def get_class_indexing(self):
"""
Get the class index.
Returns:
dict, a str-to-int mapping from label name to index.
Examples:
>>> voc_dataset_dir = "/path/to/voc_dataset_directory"
>>>
>>> dataset = ds.VOCDataset(dataset_dir=voc_dataset_dir)
>>> class_indexing = dataset.get_class_indexing()
"""
if self.task != "Detection":
raise NotImplementedError("Only 'Detection' support get_class_indexing.")
if self.class_indexing is None or not self.class_indexing:
if self._class_indexing is None:
runtime_getter = self._init_tree_getters()
self._class_indexing = runtime_getter[0].GetClassIndexing()
self.class_indexing = {}
for pair in self._class_indexing:
self.class_indexing[pair[0]] = pair[1][0]
return self.class_indexing
class CocoDataset(MappableDataset):
"""
A source dataset for reading and parsing COCO dataset.
CocoDataset supports four kinds of tasks, which are Object Detection, Keypoint Detection, Stuff Segmentation and
Panoptic Segmentation of 2017 Train/Val/Test dataset.
The generated dataset with different task setting has different output columns:
- task = :py:obj:`Detection`, output columns: :py:obj:`[image, dtype=uint8]`, :py:obj:`[bbox, dtype=float32]`, \
:py:obj:`[category_id, dtype=uint32]`, :py:obj:`[iscrowd, dtype=uint32]`.
- task = :py:obj:`Stuff`, output columns: :py:obj:`[image, dtype=uint8]`, :py:obj:`[segmentation,dtype=float32]`, \
:py:obj:`[iscrowd,dtype=uint32]`.
- task = :py:obj:`Keypoint`, output columns: :py:obj:`[image, dtype=uint8]`, \
:py:obj:`[keypoints, dtype=float32]`, :py:obj:`[num_keypoints, dtype=uint32]`.
- task = :py:obj:`Panoptic`, output columns: :py:obj:`[image, dtype=uint8]`, :py:obj:`[bbox, dtype=float32]`, \
:py:obj:`[category_id, dtype=uint32]`, :py:obj:`[iscrowd, dtype=uint32]`, :py:obj:`[area, dtype=uint32]`.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
annotation_file (str): Path to the annotation JSON file.
task (str, optional): Set the task type for reading COCO data. Supported task types:
`Detection`, `Stuff`, `Panoptic` and `Keypoint` (default= `Detection`).
num_samples (int, optional): The number of images to be included in the dataset
(default=None, all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the configuration file).
shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None, expected
order behavior shown in the table).
decode (bool, optional): Decode the images after reading (default=False).
sampler (Sampler, optional): Object used to choose samples from the dataset
(default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided
into (default=None). When this argument is specified, `num_samples` reflects
the maximum sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
extra_metadata(bool, optional): Flag to add extra meta-data to row. If True, an additional column will be
output at the end :py:obj:`[_meta-filename, dtype=string]` (default=False).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
RuntimeError: If parse JSON file failed.
ValueError: If task is not in [`Detection`, `Stuff`, `Panoptic`, `Keypoint`].
ValueError: If annotation_file is not exist.
ValueError: If dataset_dir is not exist.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- Column '[_meta-filename, dtype=string]' won't be output unless an explicit rename dataset op is added
to remove the prefix('_meta-').
- CocoDataset doesn't support PKSampler.
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> coco_dataset_dir = "/path/to/coco_dataset_directory/images"
>>> coco_annotation_file = "/path/to/coco_dataset_directory/annotation_file"
>>>
>>> # 1) Read COCO data for Detection task
>>> dataset = ds.CocoDataset(dataset_dir=coco_dataset_dir,
... annotation_file=coco_annotation_file,
... task='Detection')
>>>
>>> # 2) Read COCO data for Stuff task
>>> dataset = ds.CocoDataset(dataset_dir=coco_dataset_dir,
... annotation_file=coco_annotation_file,
... task='Stuff')
>>>
>>> # 3) Read COCO data for Panoptic task
>>> dataset = ds.CocoDataset(dataset_dir=coco_dataset_dir,
... annotation_file=coco_annotation_file,
... task='Panoptic')
>>>
>>> # 4) Read COCO data for Keypoint task
>>> dataset = ds.CocoDataset(dataset_dir=coco_dataset_dir,
... annotation_file=coco_annotation_file,
... task='Keypoint')
>>>
>>> # In COCO dataset, each dictionary has keys "image" and "annotation"
About COCO dataset:
COCO(Microsoft Common Objects in Context) is a large-scale object detection, segmentation, and captioning dataset
with several features: Object segmentation, Recognition in context, Superpixel stuff segmentation,
330K images (>200K labeled), 1.5 million object instances, 80 object categories, 91 stuff categories,
5 captions per image, 250,000 people with keypoints. In contrast to the popular ImageNet dataset, COCO has fewer
categories but more instances in per category.
You can unzip the original COCO-2017 dataset files into this directory structure and read by MindSpore's API.
.. code-block::
.
└── coco_dataset_directory
├── train2017
│ ├── 000000000009.jpg
│ ├── 000000000025.jpg
│ ├── ...
├── test2017
│ ├── 000000000001.jpg
│ ├── 000000058136.jpg
│ ├── ...
├── val2017
│ ├── 000000000139.jpg
│ ├── 000000057027.jpg
│ ├── ...
└── annotations
├── captions_train2017.json
├── captions_val2017.json
├── instances_train2017.json
├── instances_val2017.json
├── person_keypoints_train2017.json
└── person_keypoints_val2017.json
Citation:
.. code-block::
@article{DBLP:journals/corr/LinMBHPRDZ14,
author = {Tsung{-}Yi Lin and Michael Maire and Serge J. Belongie and
Lubomir D. Bourdev and Ross B. Girshick and James Hays and
Pietro Perona and Deva Ramanan and Piotr Doll{\'{a}}r and C. Lawrence Zitnick},
title = {Microsoft {COCO:} Common Objects in Context},
journal = {CoRR},
volume = {abs/1405.0312},
year = {2014},
url = {http://arxiv.org/abs/1405.0312},
archivePrefix = {arXiv},
eprint = {1405.0312},
timestamp = {Mon, 13 Aug 2018 16:48:13 +0200},
biburl = {https://dblp.org/rec/journals/corr/LinMBHPRDZ14.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
"""
@check_cocodataset
def __init__(self, dataset_dir, annotation_file, task="Detection", num_samples=None, num_parallel_workers=None,
shuffle=None, decode=False, sampler=None, num_shards=None, shard_id=None, cache=None,
extra_metadata=False):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.annotation_file = annotation_file
self.task = replace_none(task, "Detection")
self.decode = replace_none(decode, False)
self.extra_metadata = extra_metadata
def parse(self, children=None):
return cde.CocoNode(self.dataset_dir, self.annotation_file, self.task, self.decode, self.sampler,
self.extra_metadata)
def get_class_indexing(self):
"""
Get the class index.
Returns:
dict, a str-to-list<int> mapping from label name to index.
Examples:
>>> coco_dataset_dir = "/path/to/coco_dataset_directory/images"
>>> coco_annotation_file = "/path/to/coco_dataset_directory/annotation_file"
>>>
>>> # Read COCO data for Detection task
>>> dataset = ds.CocoDataset(dataset_dir=coco_dataset_dir,
... annotation_file=coco_annotation_file,
... task='Detection')
>>>
>>> class_indexing = dataset.get_class_indexing()
"""
if self.task not in {"Detection", "Panoptic"}:
raise NotImplementedError("Only 'Detection' and 'Panoptic' support get_class_indexing.")
if self._class_indexing is None:
runtime_getter = self._init_tree_getters()
self._class_indexing = dict(runtime_getter[0].GetClassIndexing())
return self._class_indexing
class CelebADataset(MappableDataset):
"""
A source dataset for reading and parsing CelebA dataset.
Only support to read `list_attr_celeba.txt` currently, which is the attribute annotations of the dataset.
The generated dataset has two columns: :py:obj:`[image, attr]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`attr` is of the uint32 type and one hot encoded.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
num_parallel_workers (int, optional): Number of workers to read the data (default=None, will use value set in
the config).
shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None).
usage (str, optional): Specify the `train`, `valid`, `test` part or `all` parts of dataset
(default= `all`, will read all samples).
sampler (Sampler, optional): Object used to choose samples from the dataset (default=None).
decode (bool, optional): decode the images after reading (default=False).
extensions (list[str], optional): List of file extensions to be included in the dataset (default=None).
num_samples (int, optional): The number of images to be included in the dataset
(default=None, will include all images).
num_shards (int, optional): Number of shards that the dataset will be divided
into (default=None). When this argument is specified, `num_samples` reflects
the maximum sample number of per shard.
shard_id (int, optional): The shard ID within `num_shards` (default=None). This
argument can only be specified when `num_shards` is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> celeba_dataset_dir = "/path/to/celeba_dataset_directory"
>>>
>>> # Read 5 samples from CelebA dataset
>>> dataset = ds.CelebADataset(dataset_dir=celeba_dataset_dir, usage='train', num_samples=5)
>>>
>>> # Note: In celeba dataset, each data dictionary owns keys "image" and "attr"
About CelebA dataset:
CelebFaces Attributes Dataset (CelebA) is a large-scale face attributes dataset
with more than 200K celebrity images, each with 40 attribute annotations.
The images in this dataset cover large pose variations and background clutter.
CelebA has large diversities, large quantities, and rich annotations, including
* 10,177 number of identities,
* 202,599 number of face images, and
* 5 landmark locations, 40 binary attributes annotations per image.
The dataset can be employed as the training and test sets for the following computer
vision tasks: face attribute recognition, face detection, landmark (or facial part)
localization, and face editing & synthesis.
Original CelebA dataset structure:
.. code-block::
.
└── CelebA
├── README.md
├── Img
│ ├── img_celeba.7z
│ ├── img_align_celeba_png.7z
│ └── img_align_celeba.zip
├── Eval
│ └── list_eval_partition.txt
└── Anno
├── list_landmarks_celeba.txt
├── list_landmarks_align_celeba.txt
├── list_bbox_celeba.txt
├── list_attr_celeba.txt
└── identity_CelebA.txt
You can unzip the dataset files into the following structure and read by MindSpore's API.
.. code-block::
.
└── celeba_dataset_directory
├── list_attr_celeba.txt
├── 000001.jpg
├── 000002.jpg
├── 000003.jpg
├── ...
Citation:
.. code-block::
@article{DBLP:journals/corr/LiuLWT14,
author = {Ziwei Liu and Ping Luo and Xiaogang Wang and Xiaoou Tang},
title = {Deep Learning Face Attributes in the Wild},
journal = {CoRR},
volume = {abs/1411.7766},
year = {2014},
url = {http://arxiv.org/abs/1411.7766},
archivePrefix = {arXiv},
eprint = {1411.7766},
timestamp = {Tue, 10 Dec 2019 15:37:26 +0100},
biburl = {https://dblp.org/rec/journals/corr/LiuLWT14.bib},
bibsource = {dblp computer science bibliography, https://dblp.org},
howpublished = {http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html}
}
"""
@check_celebadataset
def __init__(self, dataset_dir, num_parallel_workers=None, shuffle=None, usage='all', sampler=None, decode=False,
extensions=None, num_samples=None, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.decode = replace_none(decode, False)
self.extensions = replace_none(extensions, [])
self.usage = replace_none(usage, "all")
def parse(self, children=None):
if self.usage != "all":
dataset_dir = os.path.realpath(self.dataset_dir)
partition_file = os.path.join(dataset_dir, "list_eval_partition.txt")
if os.path.exists(partition_file) is False:
raise RuntimeError("Partition file can not be found when usage is not 'all'.")
return cde.CelebANode(self.dataset_dir, self.usage, self.sampler, self.decode, self.extensions)
class CLUEDataset(SourceDataset):
"""
A source dataset that reads and parses CLUE datasets.
Supported CLUE classification tasks: `AFQMC`, `TNEWS`, `IFLYTEK`, `CMNLI`, `WSC` and `CSL`.
The generated dataset with different task setting has different output columns:
- task = :py:obj:`AFQMC`
- usage = :py:obj:`train`, output columns: :py:obj:`[sentence1, dtype=string]`, \
:py:obj:`[sentence2, dtype=string]`, :py:obj:`[label, dtype=string]`.
- usage = :py:obj:`test`, output columns: :py:obj:`[id, dtype=uint8]`, \
:py:obj:`[sentence1, dtype=string]`, :py:obj:`[sentence2, dtype=string]`.
- usage = :py:obj:`eval`, output columns: :py:obj:`[sentence1, dtype=string]`, \
:py:obj:`[sentence2, dtype=string]`, :py:obj:`[label, dtype=string]`.
- task = :py:obj:`TNEWS`
- usage = :py:obj:`train`, output columns: :py:obj:`[label, dtype=string]`, \
:py:obj:`[label_des, dtype=string]`, :py:obj:`[sentence, dtype=string]`, :py:obj:`[keywords, dtype=string]`.
- usage = :py:obj:`test`, output columns: :py:obj:`[label, dtype=string]`, \
:py:obj:`[label_des, dtype=string]`, :py:obj:`[sentence, dtype=string]`, :py:obj:`[keywords, dtype=string]`.
- usage = :py:obj:`eval`, output columns: :py:obj:`[label, dtype=string]`, \
:py:obj:`[label_des, dtype=string]`, :py:obj:`[sentence, dtype=string]`, :py:obj:`[keywords, dtype=string]`.
- task = :py:obj:`IFLYTEK`
- usage = :py:obj:`train`, output columns: :py:obj:`[label, dtype=string]`, \
:py:obj:`[label_des, dtype=string]`, :py:obj:`[sentence, dtype=string]`.
- usage = :py:obj:`test`, output columns: :py:obj:`[id, dtype=string]`, \
:py:obj:`[sentence, dtype=string]`.
- usage = :py:obj:`eval`, output columns: :py:obj:`[label, dtype=string]`, \
:py:obj:`[label_des, dtype=string]`, :py:obj:`[sentence, dtype=string]`.
- task = :py:obj:`CMNLI`
- usage = :py:obj:`train`, output columns: :py:obj:`[sentence1, dtype=string]`, \
:py:obj:`[sentence2, dtype=string]`, :py:obj:`[label, dtype=string]`.
- usage = :py:obj:`test`, output columns: :py:obj:`[id, dtype=uint8]`, \
:py:obj:`[sentence1, dtype=string]`, :py:obj:`[sentence2, dtype=string]`.
- usage = :py:obj:`eval`, output columns: :py:obj:`[sentence1, dtype=string]`, \
:py:obj:`[sentence2, dtype=string]`, :py:obj:`[label, dtype=string]`.
- task = :py:obj:`WSC`
- usage = :py:obj:`train`, output columns: :py:obj:`[span1_index, dtype=uint8]`, \
:py:obj:`[span2_index, dtype=uint8]`, :py:obj:`[span1_text, dtype=string]`, \
:py:obj:`[span2_text, dtype=string]`, :py:obj:`[idx, dtype=uint8]`, \
:py:obj:`[text, dtype=string]`, :py:obj:`[label, dtype=string]`.
- usage = output columns: :py:obj:`[span1_index, dtype=uint8]`, \
:py:obj:`[span2_index, dtype=uint8]`, :py:obj:`[span1_text, dtype=string]`, \
:py:obj:`[span2_text, dtype=string]`, :py:obj:`[idx, dtype=uint8]`, :py:obj:`[text, dtype=string]`.
- usage = :py:obj:`eval`, output columns: :py:obj:`[span1_index, dtype=uint8]`, \
:py:obj:`[span2_index, dtype=uint8]`, :py:obj:`[span1_text, dtype=string]`, \
:py:obj:`[span2_text, dtype=string]`, :py:obj:`[idx, dtype=uint8]`, \
:py:obj:`[text, dtype=string]`, :py:obj:`[label, dtype=string]`.
- task = :py:obj:`CSL`
- usage = :py:obj:`train`, output columns: :py:obj:`[id, dtype=uint8]`, \
:py:obj:`[abst, dtype=string]`, :py:obj:`[keyword, dtype=string]`, :py:obj:`[label, dtype=string]`.
- usage = :py:obj:`test`, output columns: :py:obj:`[id, dtype=uint8]`, \
:py:obj:`[abst, dtype=string]`, :py:obj:`[keyword, dtype=string]`.
- usage = :py:obj:`eval`, output columns: :py:obj:`[id, dtype=uint8]`, \
:py:obj:`[abst, dtype=string]`, :py:obj:`[keyword, dtype=string]`, :py:obj:`[label, dtype=string]`.
Args:
dataset_files (Union[str, list[str]]): String or list of files to be read or glob strings to search for
a pattern of files. The list will be sorted in a lexicographical order.
task (str, optional): The kind of task, one of `AFQMC`, `TNEWS`, `IFLYTEK`, `CMNLI`, `WSC` and `CSL`.
(default=AFQMC).
usage (str, optional): Specify the `train`, `test` or `eval` part of dataset (default="train").
num_samples (int, optional): The number of samples to be included in the dataset
(default=None, will include all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch
(default=Shuffle.GLOBAL).
If shuffle is False, no shuffling will be performed;
If shuffle is True, the behavior is the same as setting shuffle to be Shuffle.GLOBAL
Otherwise, there are two levels of shuffling:
- Shuffle.GLOBAL: Shuffle both the files and samples.
- Shuffle.FILES: Shuffle files only.
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the maximum sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_files are not valid or do not exist.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
Examples:
>>> clue_dataset_dir = ["/path/to/clue_dataset_file"] # contains 1 or multiple clue files
>>> dataset = ds.CLUEDataset(dataset_files=clue_dataset_dir, task='AFQMC', usage='train')
About CLUE dataset:
CLUE, a Chinese Language Understanding Evaluation benchmark. It contains multiple
tasks, including single-sentence classification, sentence pair classification, and machine
reading comprehension.
You can unzip the dataset files into the following structure and read by MindSpore's API,
such as afqmc dataset:
.. code-block::
.
└── afqmc_public
├── train.json
├── test.json
└── dev.json
Citation:
.. code-block::
@article{CLUEbenchmark,
title = {CLUE: A Chinese Language Understanding Evaluation Benchmark},
author = {Liang Xu, Xuanwei Zhang, Lu Li, Hai Hu, Chenjie Cao, Weitang Liu, Junyi Li, Yudong Li,
Kai Sun, Yechen Xu, Yiming Cui, Cong Yu, Qianqian Dong, Yin Tian, Dian Yu, Bo Shi, Jun Zeng,
Rongzhao Wang, Weijian Xie, Yanting Li, Yina Patterson, Zuoyu Tian, Yiwen Zhang, He Zhou,
Shaoweihua Liu, Qipeng Zhao, Cong Yue, Xinrui Zhang, Zhengliang Yang, Zhenzhong Lan},
journal = {arXiv preprint arXiv:2004.05986},
year = {2020},
howpublished = {https://github.com/CLUEbenchmark/CLUE}
}
"""
@check_cluedataset
def __init__(self, dataset_files, task='AFQMC', usage='train', num_samples=None, num_parallel_workers=None,
shuffle=Shuffle.GLOBAL, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,
num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_files = self._find_files(dataset_files)
self.usage = replace_none(usage, 'train')
self.task = replace_none(task, 'AFQMC')
def parse(self, children=None):
return cde.CLUENode(self.dataset_files, self.task, self.usage, self.num_samples, self.shuffle_flag,
self.num_shards, self.shard_id)
class CSVDataset(SourceDataset):
"""
A source dataset that reads and parses comma-separated values (CSV) datasets.
The columns of generated dataset depend on the source CSV files.
Args:
dataset_files (Union[str, list[str]]): String or list of files to be read or glob strings to search
for a pattern of files. The list will be sorted in a lexicographical order.
field_delim (str, optional): A string that indicates the char delimiter to separate fields (default=',').
column_defaults (list, optional): List of default values for the CSV field (default=None). Each item
in the list is either a valid type (float, int, or string). If this is not provided, treats all
columns as string type.
column_names (list[str], optional): List of column names of the dataset (default=None). If this
is not provided, infers the column_names from the first row of CSV file.
num_samples (int, optional): The number of samples to be included in the dataset
(default=None, will include all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch
(default=Shuffle.GLOBAL).
If shuffle is False, no shuffling will be performed;
If shuffle is True, the behavior is the same as setting shuffle to be Shuffle.GLOBAL
Otherwise, there are two levels of shuffling:
- Shuffle.GLOBAL: Shuffle both the files and samples.
- Shuffle.FILES: Shuffle files only.
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the maximum sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_files are not valid or do not exist.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
Examples:
>>> csv_dataset_dir = ["/path/to/csv_dataset_file"] # contains 1 or multiple csv files
>>> dataset = ds.CSVDataset(dataset_files=csv_dataset_dir, column_names=['col1', 'col2', 'col3', 'col4'])
"""
@check_csvdataset
def __init__(self, dataset_files, field_delim=',', column_defaults=None, column_names=None, num_samples=None,
num_parallel_workers=None, shuffle=Shuffle.GLOBAL, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,
num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_files = self._find_files(dataset_files)
self.dataset_files.sort()
self.field_delim = replace_none(field_delim, ',')
self.column_defaults = replace_none(column_defaults, [])
self.column_names = replace_none(column_names, [])
def parse(self, children=None):
return cde.CSVNode(self.dataset_files, self.field_delim, self.column_defaults, self.column_names,
self.num_samples, self.shuffle_flag, self.num_shards, self.shard_id)
class SBUDataset(MappableDataset):
"""
A source dataset for reading and parsing the SBU dataset.
The generated dataset has two columns :py:obj:`[image, caption]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`caption` is of the string type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
decode (bool, optional): Decode the images after reading (default=False).
num_samples (int, optional): The number of images to be included in the dataset
(default=None, will read all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, will use value set in the config).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset
(default=None, expected order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the max sample number of per shard.
shard_id (int, optional): The shard ID within `num_shards` (default=None). This
argument can only be specified when `num_shards` is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a sampler. 'sampler' and 'shuffle' are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using 'sampler' and 'shuffle'
:widths: 25 25 50
:header-rows: 1
* - Parameter 'sampler'
- Parameter 'shuffle'
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> sbu_dataset_dir = "/path/to/sbu_dataset_directory"
>>> # Read 3 samples from SBU dataset
>>> dataset = ds.SBUDataset(dataset_dir=sbu_dataset_dir, num_samples=3)
About SBU dataset:
SBU dataset is a large captioned photo collection.
It contains one million images with associated visually relevant captions.
You should manually download the images using official download.m by replacing 'urls{i}(24, end)' with
'urls{i}(24:1:end)' and keep the directory as below.
.. code-block::
.
└─ dataset_dir
├── SBU_captioned_photo_dataset_captions.txt
├── SBU_captioned_photo_dataset_urls.txt
└── sbu_images
├── m_3326_3596303505_3ce4c20529.jpg
├── ......
└── m_2522_4182181099_c3c23ab1cc.jpg
Citation:
.. code-block::
@inproceedings{Ordonez:2011:im2text,
Author = {Vicente Ordonez and Girish Kulkarni and Tamara L. Berg},
Title = {Im2Text: Describing Images Using 1 Million Captioned Photographs},
Booktitle = {Neural Information Processing Systems ({NIPS})},
Year = {2011},
}
"""
@check_sbu_dataset
def __init__(self, dataset_dir, num_samples=None, num_parallel_workers=None, shuffle=None, decode=False,
sampler=None, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.decode = replace_none(decode, False)
def parse(self, children=None):
return cde.SBUNode(self.dataset_dir, self.decode, self.sampler)
class _Flowers102Dataset:
"""
Mainly for loading Flowers102 Dataset, and return one row each time.
"""
def __init__(self, dataset_dir, task, usage, decode):
self.dataset_dir = os.path.realpath(dataset_dir)
self.task = task
self.usage = usage
self.decode = decode
if self.task == "Classification":
self.column_names = ["image", "label"]
else:
self.column_names = ["image", "segmentation", "label"]
labels_path = os.path.join(self.dataset_dir, "imagelabels.mat")
setid_path = os.path.join(self.dataset_dir, "setid.mat")
# minus one to transform 1~102 to 0 ~ 101
self.labels = (loadmat(labels_path)["labels"][0] - 1).astype(np.uint32)
self.setid = loadmat(setid_path)
if self.usage == 'train':
self.indices = self.setid["trnid"][0].tolist()
elif self.usage == 'test':
self.indices = self.setid["tstid"][0].tolist()
elif self.usage == 'valid':
self.indices = self.setid["valid"][0].tolist()
elif self.usage == 'all':
self.indices = self.setid["trnid"][0].tolist()
self.indices += self.setid["tstid"][0].tolist()
self.indices += self.setid["valid"][0].tolist()
else:
raise ValueError("Input usage is not within the valid set of ['train', 'valid', 'test', 'all'].")
def __getitem__(self, index):
# range: 1 ~ 8189
image_path = os.path.join(self.dataset_dir, "jpg", "image_" + str(self.indices[index]).zfill(5) + ".jpg")
if not os.path.exists(image_path):
raise RuntimeError("Can not find image file: " + image_path)
if self.decode is True:
image = np.asarray(Image.open(image_path).convert("RGB"))
else:
image = np.fromfile(image_path, dtype=np.uint8)
label = self.labels[self.indices[index] - 1]
if self.task == "Segmentation":
segmentation_path = \
os.path.join(self.dataset_dir, "segmim", "segmim_" + str(self.indices[index]).zfill(5) + ".jpg")
if not os.path.exists(segmentation_path):
raise RuntimeError("Can not find segmentation file: " + segmentation_path)
if self.decode is True:
segmentation = np.asarray(Image.open(segmentation_path).convert("RGB"))
else:
segmentation = np.fromfile(segmentation_path, dtype=np.uint8)
return image, segmentation, label
return image, label
def __len__(self):
return len(self.indices)
class Flowers102Dataset(GeneratorDataset):
"""
A source dataset for reading and parsing Flowers102 dataset.
The generated dataset has two columns :py:obj:`[image, label]` or three :py:obj:`[image, segmentation, label]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`segmentation` is of the uint8 type.
The tensor of column :py:obj:`label` is a scalar or a tensor of the uint32 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
task (str): Specify the 'Classification' or 'Segmentation' task (default='Classification').
usage (str): Specify the 'train', 'valid', 'test' part or 'all' parts of dataset
(default='all', will read all samples).
num_samples (int, optional): The number of samples to be included in the dataset (default=None, all images).
num_parallel_workers (int, optional): Number of subprocesses used to fetch the dataset in parallel (default=1).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset. Random accessible input is required.
(default=None, expected order behavior shown in the table).
decode (bool, optional): Whether or not to decode the images and segmentations after reading (default=False).
sampler (Union[Sampler, Iterable], optional): Object used to choose samples from the dataset. Random accessible
input is required (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
Random accessible input is required. When this argument is specified, 'num_samples' reflects the max
sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This argument must be specified only
when num_shards is also specified. Random accessible input is required.
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a sampler. 'sampler' and 'shuffle' are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using 'sampler' and 'shuffle'
:widths: 25 25 50
:header-rows: 1
* - Parameter 'sampler'
- Parameter 'shuffle'
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> flowers102_dataset_dir = "/path/to/flowers102_dataset_directory"
>>> dataset = ds.Flowers102Dataset(dataset_dir=flowers102_dataset_dir,
... task="Classification",
... usage="all",
... decode=True)
About Flowers102 dataset:
Flowers102 dataset consists of 102 flower categories.
The flowers commonly occur in the United Kingdom.
Each class consists of between 40 and 258 images.
Here is the original Flowers102 dataset structure.
You can unzip the dataset files into this directory structure and read by MindSpore's API.
.. code-block::
.
└── flowes102_dataset_dir
├── imagelabels.mat
├── setid.mat
├── jpg
├── image_00001.jpg
├── image_00002.jpg
├── ...
├── segmim
├── segmim_00001.jpg
├── segmim_00002.jpg
├── ...
Citation:
.. code-block::
@InProceedings{Nilsback08,
author = "Maria-Elena Nilsback and Andrew Zisserman",
title = "Automated Flower Classification over a Large Number of Classes",
booktitle = "Indian Conference on Computer Vision, Graphics and Image Processing",
month = "Dec",
year = "2008",
}
"""
@check_flowers102dataset
def __init__(self, dataset_dir, task="Classification", usage="all", num_samples=None, num_parallel_workers=1,
shuffle=None, decode=False, sampler=None, num_shards=None, shard_id=None):
self.dataset_dir = os.path.realpath(dataset_dir)
self.task = replace_none(task, "Classification")
self.usage = replace_none(usage, "all")
self.decode = replace_none(decode, False)
dataset = _Flowers102Dataset(self.dataset_dir, self.task, self.usage, self.decode)
super().__init__(dataset, column_names=dataset.column_names, num_samples=num_samples,
num_parallel_workers=num_parallel_workers, shuffle=shuffle, sampler=sampler,
num_shards=num_shards, shard_id=shard_id)
def get_class_indexing(self):
"""
Get the class index.
Returns:
dict, a str-to-int mapping from label name to index.
"""
class_names = [
"pink primrose", "hard-leaved pocket orchid", "canterbury bells",
"sweet pea", "english marigold", "tiger lily", "moon orchid",
"bird of paradise", "monkshood", "globe thistle", "snapdragon",
"colt's foot", "king protea", "spear thistle", "yellow iris",
"globe-flower", "purple coneflower", "peruvian lily", "balloon flower",
"giant white arum lily", "fire lily", "pincushion flower", "fritillary",
"red ginger", "grape hyacinth", "corn poppy", "prince of wales feathers",
"stemless gentian", "artichoke", "sweet william", "carnation",
"garden phlox", "love in the mist", "mexican aster", "alpine sea holly",
"ruby-lipped cattleya", "cape flower", "great masterwort", "siam tulip",
"lenten rose", "barbeton daisy", "daffodil", "sword lily", "poinsettia",
"bolero deep blue", "wallflower", "marigold", "buttercup", "oxeye daisy",
"common dandelion", "petunia", "wild pansy", "primula", "sunflower",
"pelargonium", "bishop of llandaff", "gaura", "geranium", "orange dahlia",
"pink-yellow dahlia?", "cautleya spicata", "japanese anemone",
"black-eyed susan", "silverbush", "californian poppy", "osteospermum",
"spring crocus", "bearded iris", "windflower", "tree poppy", "gazania",
"azalea", "water lily", "rose", "thorn apple", "morning glory",
"passion flower", "lotus", "toad lily", "anthurium", "frangipani",
"clematis", "hibiscus", "columbine", "desert-rose", "tree mallow",
"magnolia", "cyclamen", "watercress", "canna lily", "hippeastrum",
"bee balm", "ball moss", "foxglove", "bougainvillea", "camellia", "mallow",
"mexican petunia", "bromelia", "blanket flower", "trumpet creeper",
"blackberry lily"
]
class_dict = {}
for i, class_name in enumerate(class_names):
class_dict[class_name] = i
return class_dict
class TextFileDataset(SourceDataset):
"""
A source dataset that reads and parses datasets stored on disk in text format.
The generated dataset has one column :py:obj:`[text]` with type string.
Args:
dataset_files (Union[str, list[str]]): String or list of files to be read or glob strings to search for a
pattern of files. The list will be sorted in a lexicographical order.
num_samples (int, optional): The number of samples to be included in the dataset
(default=None, will include all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch
(default=Shuffle.GLOBAL).
If shuffle is False, no shuffling will be performed;
If shuffle is True, the behavior is the same as setting shuffle to be Shuffle.GLOBAL
Otherwise, there are two levels of shuffling:
- Shuffle.GLOBAL: Shuffle both the files and samples.
- Shuffle.FILES: Shuffle files only.
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the maximum sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_files are not valid or do not exist.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
Examples:
>>> text_file_dataset_dir = ["/path/to/text_file_dataset_file"] # contains 1 or multiple text files
>>> dataset = ds.TextFileDataset(dataset_files=text_file_dataset_dir)
"""
@check_textfiledataset
def __init__(self, dataset_files, num_samples=None, num_parallel_workers=None, shuffle=Shuffle.GLOBAL,
num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,
num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_files = self._find_files(dataset_files)
self.dataset_files.sort()
def parse(self, children=None):
return cde.TextFileNode(self.dataset_files, self.num_samples, self.shuffle_flag, self.num_shards,
self.shard_id)
class _NumpySlicesDataset:
"""
Mainly for dealing with several kinds of formats of Python data, and return one row each time.
"""
def __init__(self, data, column_list=None):
self.column_list = None
# Convert dict data into tuple
if isinstance(data, dict):
data = self.process_dict(data)
if isinstance(data, tuple):
self.data = ()
data_len = len(data)
for i in range(data_len):
self.data = self.data + (np.array(data[i]),)
else:
self.data = (np.array(data),)
# check whether the data length in each column is equal
data_len = [len(data_item) for data_item in self.data]
if data_len[1:] != data_len[:-1]:
raise ValueError("Data length in each column is not equal.")
# Init column_name
if column_list is not None:
self.column_list = column_list
elif self.column_list is None:
self.column_list = []
column_num = len(self.data)
for i in range(column_num):
self.column_list.append("column_" + str(i))
def __getitem__(self, index):
data_row = [d[index, ...] for d in self.data]
data_res = tuple(data_row)
return data_res
def __len__(self):
return len(self.data[0])
def process_dict(self, input_data):
"""
Convert the dict like data into tuple format, when input is a tuple of dicts then compose it into a dict first.
"""
# Convert pandas like dict(has "values" column) into General dict
data_keys = list(input_data.keys())
data_col = input_data[data_keys[0]]
if hasattr(data_col, "values"):
new_dict = {}
for key in data_keys:
item1 = input_data.pop(key)
new_dict[key] = item1.values
input_data = new_dict
# Convert the data in dict into tuple
data = ()
keys = list(input_data.keys())
self.column_list = keys
for key in keys:
value = input_data[key]
data = data + (list(value),)
return data
class NumpySlicesDataset(GeneratorDataset):
"""
Creates a dataset with given data slices, mainly for loading Python data into dataset.
The column names and column types of generated dataset depend on Python data defined by users.
Args:
data (Union[list, tuple, dict]) Input of given data. Supported data types include: list, tuple, dict and other
NumPy formats. Input data will be sliced along the first dimension and generate additional rows, if input is
list, there will be one column in each row, otherwise there tends to be multi columns. Large data is not
recommended to be loaded in this way as data is loading into memory.
column_names (list[str], optional): List of column names of the dataset (default=None). If column_names is not
provided, the output column names will be named as the keys of dict when the input data is a dict,
otherwise they will be named like column_0, column_1 ...
num_samples (int, optional): The number of samples to be included in the dataset (default=None, all samples).
num_parallel_workers (int, optional): Number of subprocesses used to fetch the dataset in parallel (default=1).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset. Random accessible input is required.
(default=None, expected order behavior shown in the table).
sampler (Union[Sampler, Iterable], optional): Object used to choose samples from the dataset. Random accessible
input is required (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
Random accessible input is required. When this argument is specified, `num_samples` reflects the max
sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This argument must be specified only
when num_shards is also specified. Random accessible input is required.
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Raises:
RuntimeError: If len of column_names does not match output len of data.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Examples:
>>> # 1) Input data can be a list
>>> data = [1, 2, 3]
>>> dataset = ds.NumpySlicesDataset(data=data, column_names=["column_1"])
>>>
>>> # 2) Input data can be a dictionary, and column_names will be its keys
>>> data = {"a": [1, 2], "b": [3, 4]}
>>> dataset = ds.NumpySlicesDataset(data=data)
>>>
>>> # 3) Input data can be a tuple of lists (or NumPy arrays), each tuple element refers to data in each column
>>> data = ([1, 2], [3, 4], [5, 6])
>>> dataset = ds.NumpySlicesDataset(data=data, column_names=["column_1", "column_2", "column_3"])
>>>
>>> # 4) Load data from CSV file
>>> import pandas as pd
>>> df = pd.read_csv(filepath_or_buffer=csv_dataset_dir[0])
>>> dataset = ds.NumpySlicesDataset(data=dict(df), shuffle=False)
"""
@check_numpyslicesdataset
def __init__(self, data, column_names=None, num_samples=None, num_parallel_workers=1, shuffle=None, sampler=None,
num_shards=None, shard_id=None):
dataset = _NumpySlicesDataset(data, column_names)
super().__init__(dataset, column_names=dataset.column_list, num_samples=num_samples,
num_parallel_workers=num_parallel_workers, shuffle=shuffle, sampler=sampler,
num_shards=num_shards, shard_id=shard_id)
class _PaddedDataset:
"""
Mainly for combining false samples provided by users into a dataset.
Args:
padded_samples (list(dict)): Data provided by user to be added to the initial Dataset.
"""
def __init__(self, padded_samples):
self.column_names = list(padded_samples[0].keys())
self.padded_samples = padded_samples
def __getitem__(self, item):
return (self.padded_samples[item][key] for key in self.column_names)
def __len__(self):
return len(self.padded_samples)
class PaddedDataset(GeneratorDataset):
"""
Creates a dataset with filler data provided by user. Mainly used to add to the original data set
and assign it to the corresponding shard.
Args:
padded_samples (list(dict)): Samples provided by user.
Raises:
TypeError: If padded_samples is not an instance of list.
TypeError: If the element of padded_samples is not an instance of dict.
ValueError: If the padded_samples is empty.
Examples:
>>> import numpy as np
>>> data = [{'image': np.zeros(1, np.uint8)}, {'image': np.zeros(2, np.uint8)}]
>>> dataset = ds.PaddedDataset(padded_samples=data)
"""
@check_paddeddataset
def __init__(self, padded_samples):
dataset = _PaddedDataset(padded_samples)
super().__init__(dataset, column_names=dataset.column_names, num_shards=None, shard_id=None, shuffle=False)
self._dataset_size = len(dataset.padded_samples)
self.padded_samples = padded_samples
class EMnistDataset(MappableDataset):
"""
A source dataset for reading and parsing the EMNIST dataset.
The generated dataset has two columns :py:obj:`[image, label]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`label` is a scalar of the uint32 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
name (str): Name of splits for this dataset, can be "byclass", "bymerge", "balanced", "letters", "digits"
or "mnist".
usage (str, optional): Usage of this dataset, can be "train", "test" or "all".
(default=None, will read all samples).
num_samples (int, optional): The number of images to be included in the dataset
(default=None, will read all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, will use value set in the config).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset
(default=None, expected order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the max sample number of per shard.
shard_id (int, optional): The shard ID within `num_shards` (default=None). This
argument can only be specified when `num_shards` is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> emnist_dataset_dir = "/path/to/emnist_dataset_directory"
>>>
>>> # Read 3 samples from EMNIST dataset
>>> dataset = ds.EMnistDataset(dataset_dir=emnist_dataset_dir, name="mnist", num_samples=3)
>>>
>>> # Note: In emnist_dataset dataset, each dictionary has keys "image" and "label"
About EMNIST dataset:
The EMNIST dataset is a set of handwritten character digits derived from the NIST Special
Database 19 and converted to a 28x28 pixel image format and dataset structure that directly
matches the MNIST dataset. Further information on the dataset contents and conversion process
can be found in the paper available at https://arxiv.org/abs/1702.05373v1.
The numbers of characters and classes of each split of EMNIST are as follows:
By Class: 814,255 characters and 62 unbalanced classes.
By Merge: 814,255 characters and 47 unbalanced classes.
Balanced: 131,600 characters and 47 balanced classes.
Letters: 145,600 characters and 26 balanced classes.
Digits: 280,000 characters and 10 balanced classes.
MNIST: 70,000 characters and 10 balanced classes.
Here is the original EMNIST dataset structure.
You can unzip the dataset files into this directory structure and read by MindSpore's API.
.. code-block::
.
└── mnist_dataset_dir
├── emnist-mnist-train-images-idx3-ubyte
├── emnist-mnist-train-labels-idx1-ubyte
├── emnist-mnist-test-images-idx3-ubyte
├── emnist-mnist-test-labels-idx1-ubyte
├── ...
Citation:
.. code-block::
@article{cohen_afshar_tapson_schaik_2017,
title = {EMNIST: Extending MNIST to handwritten letters},
DOI = {10.1109/ijcnn.2017.7966217},
journal = {2017 International Joint Conference on Neural Networks (IJCNN)},
author = {Cohen, Gregory and Afshar, Saeed and Tapson, Jonathan and Schaik, Andre Van},
year = {2017},
howpublished = {https://www.westernsydney.edu.au/icns/reproducible_research/
publication_support_materials/emnist}
}
"""
@check_emnist_dataset
def __init__(self, dataset_dir, name, usage=None, num_samples=None, num_parallel_workers=None,
shuffle=None, sampler=None, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.name = name
self.usage = replace_none(usage, "all")
def parse(self, children=None):
return cde.EMnistNode(self.dataset_dir, self.name, self.usage, self.sampler)
class FakeImageDataset(MappableDataset):
"""
A source dataset for generating fake images.
The generated dataset has two columns :py:obj:`[image, label]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`label` is a scalar of the uint32 type.
Args:
num_images (int, optional): Number of images to generate in the dataset (default=1000).
image_size (tuple, optional): Size of the fake image (default=(224, 224, 3)).
num_classes (int, optional): Number of classes in the dataset (default=10).
base_seed (int, optional): Offsets the index-based random seed used to generate each image (default=0).
num_samples (int, optional): The number of images to be included in the dataset
(default=None, will read all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, will use value set in the config).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset
(default=None, expected order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the max sample number of per shard.
shard_id (int, optional): The shard ID within `num_shards` (default=None). This
argument can only be specified when `num_shards` is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a sampler. 'sampler' and 'shuffle' are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using 'sampler' and 'shuffle'
:widths: 25 25 50
:header-rows: 1
* - Parameter 'sampler'
- Parameter 'shuffle'
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> # Read 3 samples from FakeImage dataset
>>> dataset = ds.FakeImageDataset(num_images=1000, image_size=(224,224,3),
... num_classes=10, base_seed=0, num_samples=3)
>>>
>>> # Note: In FakeImage dataset, each dictionary has keys "image" and "label"
"""
@check_fake_image_dataset
def __init__(self, num_images=1000, image_size=(224, 224, 3), num_classes=10, base_seed=0, num_samples=None,
num_parallel_workers=None, shuffle=None, sampler=None, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.num_images = num_images
self.image_size = image_size
self.num_classes = num_classes
self.base_seed = base_seed
def parse(self, children=None):
return cde.FakeImageNode(self.num_images, self.image_size, self.num_classes, self.base_seed, self.sampler)
class FlickrDataset(MappableDataset):
"""
A source dataset for reading and parsing Flickr8k and Flickr30k dataset.
The generated dataset has two columns :py:obj:`[image, annotation]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`annotation` is a tensor which contains 5 annotations string,
such as ["a", "b", "c", "d", "e"].
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
annotation_file (str): Path to the root directory that contains the annotation.
num_samples (int, optional): The number of images to be included in the dataset.
(default=None, all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None, expected
order behavior shown in the table).
decode (bool, optional): Decode the images after reading (default=False).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided
into (default=None). When this argument is specified, `num_samples` reflects
the max sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir is not valid or does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If dataset_dir is not exist.
ValueError: If annotation_file is not exist.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> flickr_dataset_dir = "/path/to/flickr_dataset_directory"
>>> annotation_file = "/path/to/flickr_annotation_file"
>>>
>>> # 1) Get all samples from FLICKR dataset in sequence
>>> dataset = ds.FlickrDataset(dataset_dir=flickr_dataset_dir,
... annotation_file=annotation_file,
... shuffle=False)
>>>
>>> # 2) Randomly select 350 samples from FLICKR dataset
>>> dataset = ds.FlickrDataset(dataset_dir=flickr_dataset_dir,
... annotation_file=annotation_file,
... num_samples=350,
... shuffle=True)
>>>
>>> # 3) Get samples from FLICKR dataset for shard 0 in a 2-way distributed training
>>> dataset = ds.FlickrDataset(dataset_dir=flickr_dataset_dir,
... annotation_file=annotation_file,
... num_shards=2,
... shard_id=0)
>>>
>>> # In FLICKR dataset, each dictionary has keys "image" and "annotation"
About Flickr8k dataset:
The Flickr8k dataset consists of 8092 colour images. There are 40460 annotations in the Flickr8k.token.txt,
each image has 5 annotations.
You can unzip the dataset files into the following directory structure and read by MindSpore's API.
.. code-block::
.
└── Flickr8k
├── Flickr8k_Dataset
│ ├── 1000268201_693b08cb0e.jpg
│ ├── 1001773457_577c3a7d70.jpg
│ ├── ...
└── Flickr8k.token.txt
Citation:
.. code-block::
@article{DBLP:journals/jair/HodoshYH13,
author = {Micah Hodosh and Peter Young and Julia Hockenmaier},
title = {Framing Image Description as a Ranking Task: Data, Models and Evaluation Metrics},
journal = {J. Artif. Intell. Res.},
volume = {47},
pages = {853--899},
year = {2013},
url = {https://doi.org/10.1613/jair.3994},
doi = {10.1613/jair.3994},
timestamp = {Mon, 21 Jan 2019 15:01:17 +0100},
biburl = {https://dblp.org/rec/journals/jair/HodoshYH13.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
About Flickr30k dataset:
The Flickr30k dataset consists of 31783 colour images. There are 158915 annotations in
the results_20130124.token, each image has 5 annotations.
You can unzip the dataset files into the following directory structure and read by MindSpore's API.
Citation:
.. code-block::
.
└── Flickr30k
├── flickr30k-images
│ ├── 1000092795.jpg
│ ├── 10002456.jpg
│ ├── ...
└── results_20130124.token
.. code-block::
@article{DBLP:journals/tacl/YoungLHH14,
author = {Peter Young and Alice Lai and Micah Hodosh and Julia Hockenmaier},
title = {From image descriptions to visual denotations: New similarity metrics
for semantic inference over event descriptions},
journal = {Trans. Assoc. Comput. Linguistics},
volume = {2},
pages = {67--78},
year = {2014},
url = {https://tacl2013.cs.columbia.edu/ojs/index.php/tacl/article/view/229},
timestamp = {Wed, 17 Feb 2021 21:55:25 +0100},
biburl = {https://dblp.org/rec/journals/tacl/YoungLHH14.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
"""
@check_flickr_dataset
def __init__(self, dataset_dir, annotation_file, num_samples=None, num_parallel_workers=None, shuffle=None,
decode=None, sampler=None, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.annotation_file = annotation_file
self.decode = replace_none(decode, False)
def parse(self, children=None):
return cde.FlickrNode(self.dataset_dir, self.annotation_file, self.decode, self.sampler)
class SBDataset(GeneratorDataset):
"""
A source dataset for reading and parsing Semantic Boundaries Dataset.
The generated dataset has two columns: :py:obj:`[image, task]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`task` contains 20 images of the uint8 type if `task` is `Boundaries` otherwise
contains 1 image of the uint8 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
task (str, optional): Acceptable tasks include `Boundaries` or `Segmentation` (default= `Boundaries`).
usage (str, optional): Acceptable usages include `train`, `val`, `train_noval` and `all` (default= `all`).
num_samples (int, optional): The number of images to be included in the dataset.
(default=None, all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None, expected
order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided
into (default=None). When this argument is specified, `num_samples` reflects
the max sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
Raises:
RuntimeError: If dataset_dir is not valid or does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If dataset_dir is not exist.
ValueError: If task is not in [`Boundaries`, `Segmentation`].
ValueError: If usage is not in [`train`, `val`, `train_noval`, `all`].
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a sampler. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> sb_dataset_dir = "/path/to/sb_dataset_directory"
>>>
>>> # 1) Get all samples from Semantic Boundaries Dataset in sequence
>>> dataset = ds.SBDataset(dataset_dir=sb_dataset_dir, shuffle=False)
>>>
>>> # 2) Randomly select 350 samples from Semantic Boundaries Dataset
>>> dataset = ds.SBDataset(dataset_dir=sb_dataset_dir, num_samples=350, shuffle=True)
>>>
>>> # 3) Get samples from Semantic Boundaries Dataset for shard 0 in a 2-way distributed training
>>> dataset = ds.SBDataset(dataset_dir=sb_dataset_dir, num_shards=2, shard_id=0)
>>>
>>> # In Semantic Boundaries Dataset, each dictionary has keys "image" and "task"
About Semantic Boundaries Dataset:
The Semantic Boundaries Dataset consists of 11355 colour images. There are 8498 images' name in the train.txt,
2857 images' name in the val.txt and 5623 images' name in the train_noval.txt. The category cls/
contains the Segmentation and Boundaries results of category-level, the category inst/ catains the
Segmentation and Boundaries results of instance-level.
You can unzip the dataset files into the following structure and read by MindSpore's API:
.. code-block::
.
└── benchmark_RELEASE
├── dataset
├── img
│ ├── 2008_000002.jpg
│ ├── 2008_000003.jpg
│ ├── ...
├── cls
│ ├── 2008_000002.mat
│ ├── 2008_000003.mat
│ ├── ...
├── inst
│ ├── 2008_000002.mat
│ ├── 2008_000003.mat
│ ├── ...
├── train.txt
└── val.txt
.. code-block::
@InProceedings{BharathICCV2011,
author = "Bharath Hariharan and Pablo Arbelaez and Lubomir Bourdev and
Subhransu Maji and Jitendra Malik",
title = "Semantic Contours from Inverse Detectors",
booktitle = "International Conference on Computer Vision (ICCV)",
year = "2011",
"""
@check_sb_dataset
def __init__(self, dataset_dir, task='Boundaries', usage='all', num_samples=None, num_parallel_workers=1,
shuffle=None, decode=None, sampler=None, num_shards=None, shard_id=None):
dataset = _SBDataset(dataset_dir, task, usage, decode)
super().__init__(dataset, column_names=dataset.column_list, num_samples=num_samples,
num_parallel_workers=num_parallel_workers, shuffle=shuffle, sampler=sampler,
num_shards=num_shards, shard_id=shard_id)
class _SBDataset:
"""
Dealing with the data file with .mat extension, and return one row in tuple (image, task) each time.
"""
def __init__(self, dataset_dir, task, usage, decode):
self.column_list = ['image', 'task']
self.task = task
self.images_path = os.path.join(dataset_dir, 'img')
self.cls_path = os.path.join(dataset_dir, 'cls')
self._loadmat = loadmat
self.categories = 20
self.decode = replace_none(decode, False)
if usage == "all":
image_names = []
for item in ["train", "val"]:
usage_path = os.path.join(dataset_dir, item + '.txt')
if not os.path.exists(usage_path):
raise FileNotFoundError("SBDataset: {0} not found".format(usage_path))
with open(usage_path, 'r') as f:
image_names += [x.strip() for x in f.readlines()]
else:
usage_path = os.path.join(dataset_dir, usage + '.txt')
if not os.path.exists(usage_path):
raise FileNotFoundError("SBDataset: {0} not found".format(usage_path))
with open(usage_path, 'r') as f:
image_names = [x.strip() for x in f.readlines()]
self.images = [os.path.join(self.images_path, i + ".jpg") for i in image_names]
self.clss = [os.path.join(self.cls_path, i + ".mat") for i in image_names]
if len(self.images) != len(self.clss):
raise ValueError("SBDataset: images count not equal to cls count")
self._get_data = self._get_boundaries_data if self.task == "Boundaries" else self._get_segmentation_data
self._get_item = self._get_decode_item if self.decode else self._get_undecode_item
def _get_boundaries_data(self, mat_path):
mat_data = self._loadmat(mat_path)
return np.concatenate([np.expand_dims(mat_data['GTcls'][0][self.task][0][i][0].toarray(), axis=0)
for i in range(self.categories)], axis=0)
def _get_segmentation_data(self, mat_path):
mat_data = self._loadmat(mat_path)
return Image.fromarray(mat_data['GTcls'][0][self.task][0])
def _get_decode_item(self, idx):
return Image.open(self.images[idx]).convert('RGB'), self._get_data(self.clss[idx])
def _get_undecode_item(self, idx):
return np.fromfile(self.images[idx], dtype=np.uint8), self._get_data(self.clss[idx])
def __len__(self):
return len(self.images)
def __getitem__(self, idx):
return self._get_item(idx)
class DeserializedDataset(Dataset):
def __init__(self, input_obj):
super().__init__()
self.input_obj = input_obj
def parse(self, children=None):
if isinstance(self.input_obj, dict):
json_str = json.dumps(self.input_obj)
return cde.Dataset.from_json_string(json_str)
return cde.Dataset.from_json_file(self.input_obj)
class CityscapesDataset(MappableDataset):
"""
A source dataset for reading and parsing Cityscapes dataset.
The generated dataset has two columns :py:obj:`[image, task]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`task` is of the uint8 type if task is not 'polygon' otherwise task is
a string tensor with serialize json.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str): Acceptable usages include `train`, `test`, `val` or `all` if quality_mode is `fine`
otherwise `train`, `train_extra`, `val` or `all` (default= `train`).
quality_mode (str): Acceptable quality_modes include `fine` or `coarse` (default= `fine`).
task (str): Acceptable tasks include `instance`, `semantic`, `polygon` or `color` (default= `instance`).
num_samples (int, optional): The number of images to be included in the dataset.
(default=None, all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None, expected
order behavior shown in the table).
decode (bool, optional): Decode the images after reading (default=False).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided
into (default=None). When this argument is specified, `num_samples` reflects
the max sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir is invalid or does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If dataset_dir is not exist.
ValueError: If task is invalid.
ValueError: If quality_mode is invalid.
ValueError: If usage is invalid.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> cityscapes_dataset_dir = "/path/to/cityscapes_dataset_directory"
>>>
>>> # 1) Get all samples from Cityscapes dataset in sequence
>>> dataset = ds.CityscapesDataset(dataset_dir=cityscapes_dataset_dir, task="instance", quality_mode="fine",
... usage="train", shuffle=False, num_parallel_workers=1)
>>>
>>> # 2) Randomly select 350 samples from Cityscapes dataset
>>> dataset = ds.CityscapesDataset(dataset_dir=cityscapes_dataset_dir, num_samples=350, shuffle=True,
... num_parallel_workers=1)
>>>
>>> # 3) Get samples from Cityscapes dataset for shard 0 in a 2-way distributed training
>>> dataset = ds.CityscapesDataset(dataset_dir=cityscapes_dataset_dir, num_shards=2, shard_id=0,
... num_parallel_workers=1)
>>>
>>> # In Cityscapes dataset, each dictionary has keys "image" and "task"
About Cityscapes dataset:
The Cityscapes dataset consists of 5000 colour images with high quality dense pixel annotations and
19998 colour images with coarser polygonal annotations in 50 cities. There are 30 classes in this
dataset and the polygonal annotations include dense semantic segmentation and instance segmentation
for vehicle and people.
You can unzip the dataset files into the following directory structure and read by MindSpore's API.
Taking the quality_mode of `fine` as an example.
.. code-block::
.
└── Cityscapes
├── leftImg8bit
| ├── train
| | ├── aachen
| | | ├── aachen_000000_000019_leftImg8bit.png
| | | ├── aachen_000001_000019_leftImg8bit.png
| | | ├── ...
| | ├── bochum
| | | ├── ...
| | ├── ...
| ├── test
| | ├── ...
| ├── val
| | ├── ...
└── gtFine
├── train
| ├── aachen
| | ├── aachen_000000_000019_gtFine_color.png
| | ├── aachen_000000_000019_gtFine_instanceIds.png
| | ├── aachen_000000_000019_gtFine_labelIds.png
| | ├── aachen_000000_000019_gtFine_polygons.json
| | ├── aachen_000001_000019_gtFine_color.png
| | ├── aachen_000001_000019_gtFine_instanceIds.png
| | ├── aachen_000001_000019_gtFine_labelIds.png
| | ├── aachen_000001_000019_gtFine_polygons.json
| | ├── ...
| ├── bochum
| | ├── ...
| ├── ...
├── test
| ├── ...
└── val
├── ...
Citation:
.. code-block::
@inproceedings{Cordts2016Cityscapes,
title = {The Cityscapes Dataset for Semantic Urban Scene Understanding},
author = {Cordts, Marius and Omran, Mohamed and Ramos, Sebastian and Rehfeld, Timo and Enzweiler,
Markus and Benenson, Rodrigo and Franke, Uwe and Roth, Stefan and Schiele, Bernt},
booktitle = {Proc. of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
year = {2016}
}
"""
@check_cityscapes_dataset
def __init__(self, dataset_dir, usage="train", quality_mode="fine", task="instance", num_samples=None,
num_parallel_workers=None, shuffle=None, decode=None, sampler=None, num_shards=None,
shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.task = task
self.quality_mode = quality_mode
self.usage = usage
self.decode = replace_none(decode, False)
def parse(self, children=None):
return cde.CityscapesNode(self.dataset_dir, self.usage, self.quality_mode, self.task, self.decode, self.sampler)
class DIV2KDataset(MappableDataset):
"""
A source dataset for reading and parsing DIV2KDataset dataset.
The generated dataset has two columns :py:obj:`[hr_image, lr_image]`.
The tensor of column :py:obj:`hr_image` is of the uint8 type.
The tensor of column :py:obj:`lr_image` is of the uint8 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str): Acceptable usages include `train`, `valid` or `all` (default= `train`).
downgrade (str): Acceptable downgrades include `bicubic`, `unknown`, `mild`, `difficult` or
`wild` (default= `bicubic`).
scale (int): Acceptable scales include 2, 3, 4 or 8 (default=2).
When `downgrade` is `bicubic`, scale can be 2, 3, 4, 8.
When `downgrade` is `unknown`, scale can only be 2, 3, 4.
When `downgrade` is `mild`, `difficult` or `wild`, scale can only be 4.
num_samples (int, optional): The number of images to be included in the dataset.
(default=None, all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None, expected
order behavior shown in the table).
decode (bool, optional): Decode the images after reading (default=False).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided
into (default=None). When this argument is specified, `num_samples` reflects
the max sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir is invalid or does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If dataset_dir is not exist.
ValueError: If usage is invalid.
ValueError: If downgrade is invalid.
ValueError: If scale is invalid.
ValueError: If scale equal to 8 and downgrade not equal to `bicubic`.
ValueError: If downgrade in [`mild`, `difficult`, `wild`] and scale not equal to 4.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> div2k_dataset_dir = "/path/to/div2k_dataset_directory"
>>>
>>> # 1) Get all samples from DIV2K dataset in sequence
>>> dataset = ds.DIV2KDataset(dataset_dir=div2k_dataset_dir, usage="train", scale=2, downgrade="bicubic",
... shuffle=False)
>>>
>>> # 2) Randomly select 350 samples from DIV2K dataset
>>> dataset = ds.DIV2KDataset(dataset_dir=div2k_dataset_dir, usage="train", scale=2, downgrade="bicubic",
... num_samples=350, shuffle=True)
>>>
>>> # 3) Get samples from DIV2K dataset for shard 0 in a 2-way distributed training
>>> dataset = ds.DIV2KDataset(dataset_dir=div2k_dataset_dir, usage="train", scale=2, downgrade="bicubic",
... num_shards=2, shard_id=0)
>>>
>>> # In DIV2K dataset, each dictionary has keys "hr_image" and "lr_image"
About DIV2K dataset:
The DIV2K dataset consists of 1000 2K resolution images, among which 800 images are for training, 100 images
are for validation and 100 images are for testing. NTIRE 2017 and NTIRE 2018 include only training dataset
and validation dataset.
You can unzip the dataset files into the following directory structure and read by MindSpore's API.
Take the training set as an example.
.. code-block::
.
└── DIV2K
├── DIV2K_train_HR
| ├── 0001.png
| ├── 0002.png
| ├── ...
├── DIV2K_train_LR_bicubic
| ├── X2
| | ├── 0001x2.png
| | ├── 0002x2.png
| | ├── ...
| ├── X3
| | ├── 0001x3.png
| | ├── 0002x3.png
| | ├── ...
| └── X4
| ├── 0001x4.png
| ├── 0002x4.png
| ├── ...
├── DIV2K_train_LR_unknown
| ├── X2
| | ├── 0001x2.png
| | ├── 0002x2.png
| | ├── ...
| ├── X3
| | ├── 0001x3.png
| | ├── 0002x3.png
| | ├── ...
| └── X4
| ├── 0001x4.png
| ├── 0002x4.png
| ├── ...
├── DIV2K_train_LR_mild
| ├── 0001x4m.png
| ├── 0002x4m.png
| ├── ...
├── DIV2K_train_LR_difficult
| ├── 0001x4d.png
| ├── 0002x4d.png
| ├── ...
├── DIV2K_train_LR_wild
| ├── 0001x4w.png
| ├── 0002x4w.png
| ├── ...
└── DIV2K_train_LR_x8
├── 0001x8.png
├── 0002x8.png
├── ...
Citation:
.. code-block::
@InProceedings{Agustsson_2017_CVPR_Workshops,
author = {Agustsson, Eirikur and Timofte, Radu},
title = {NTIRE 2017 Challenge on Single Image Super-Resolution: Dataset and Study},
booktitle = {The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) Workshops},
url = "http://www.vision.ee.ethz.ch/~timofter/publications/Agustsson-CVPRW-2017.pdf",
month = {July},
year = {2017}
}
"""
@check_div2k_dataset
def __init__(self, dataset_dir, usage="train", downgrade="bicubic", scale=2, num_samples=None,
num_parallel_workers=None, shuffle=None, decode=None, sampler=None, num_shards=None,
shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.usage = usage
self.scale = scale
self.downgrade = downgrade
self.decode = replace_none(decode, False)
def parse(self, children=None):
return cde.DIV2KNode(self.dataset_dir, self.usage, self.downgrade, self.scale, self.decode, self.sampler)
|
cifar10_to_mr.py
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Cifar10 convert tool for MindRecord.
"""
from importlib import import_module
import os
import numpy as np
from mindspore import log as logger
from .cifar10 import Cifar10
from ..common.exceptions import PathNotExistsError
from ..filewriter import FileWriter
from ..shardutils import check_filename, ExceptionThread, SUCCESS, FAILED
try:
cv2 = import_module("cv2")
except ModuleNotFoundError:
cv2 = None
__all__ = ['Cifar10ToMR']
class Cifar10ToMR:
"""
A class to transform from cifar10 to MindRecord.
Args:
source (str): the cifar10 directory to be transformed.
destination (str): the MindRecord file path to transform into.
Raises:
ValueError: If source or destination is invalid.
"""
def __init__(self, source, destination):
check_filename(source)
self.source = source
files = os.listdir(self.source)
train_data_flag = False
test_data_flag = False
for file in files:
if file.startswith("data_batch_"):
train_data_flag = True
if file.startswith("test_batch"):
test_data_flag = True
if not train_data_flag:
raise PathNotExistsError("data_batch_*")
if not test_data_flag:
raise PathNotExistsError("test_batch")
check_filename(destination)
self.destination = destination
self.writer = None
def run(self, fields=None):
"""
Executes transformation from cifar10 to MindRecord.
Args:
fields (list[str], optional): A list of index fields, e.g.["label"] (default=None).
Returns:
SUCCESS or FAILED, whether cifar10 is successfully transformed to MindRecord.
"""
if fields and not isinstance(fields, list):
raise ValueError("The parameter fields should be None or list")
cifar10_data = Cifar10(self.source, False)
cifar10_data.load_data()
images = cifar10_data.images
logger.info("train images: {}".format(images.shape))
labels = cifar10_data.labels
logger.info("train images label: {}".format(labels.shape))
test_images = cifar10_data.Test.images
logger.info("test images: {}".format(test_images.shape))
test_labels = cifar10_data.Test.labels
logger.info("test images label: {}".format(test_labels.shape))
data_list = _construct_raw_data(images, labels)
test_data_list = _construct_raw_data(test_images, test_labels)
if _generate_mindrecord(self.destination, data_list, fields, "img_train") != SUCCESS:
return FAILED
if _generate_mindrecord(self.destination + "_test", test_data_list, fields, "img_test") != SUCCESS:
return FAILED
return SUCCESS
def transform(self, fields=None):
t = ExceptionThread(target=self.run, kwargs={'fields': fields})
t.daemon = True
t.start()
t.join()
if t.exitcode != 0:
raise t.exception
return t.res
def _construct_raw_data(images, labels):
"""
Construct raw data from cifar10 data.
Args:
images (list): image list from cifar10.
labels (list): label list from cifar10.
Returns:
SUCCESS/FAILED, whether successfully written into MindRecord.
"""
if not cv2:
raise ModuleNotFoundError("opencv-python module not found, please use pip install it.")
raw_data = []
for i, img in enumerate(images):
label = np.int(labels[i][0])
_, img = cv2.imencode(".jpeg", img[..., [2, 1, 0]])
row_data = {"id": int(i),
"data": img.tobytes(),
"label": int(label)}
raw_data.append(row_data)
return raw_data
def _generate_mindrecord(file_name, raw_data, fields, schema_desc):
"""
Generate MindRecord file from raw data.
Args:
file_name (str): File name of MindRecord File.
fields (list[str]): Fields would be set as index which
could not belong to blob fields and type could not be 'array' or 'bytes'.
raw_data (dict): dict of raw data.
schema_desc (str): String of schema description.
Returns:
SUCCESS/FAILED, whether successfully written into MindRecord.
"""
schema = {"id": {"type": "int64"}, "label": {"type": "int64"},
"data": {"type": "bytes"}}
logger.info("transformed MindRecord schema is: {}".format(schema))
writer = FileWriter(file_name, 1)
writer.add_schema(schema, schema_desc)
if fields and isinstance(fields, list):
writer.add_index(fields)
writer.write_raw_data(raw_data)
return writer.commit()
|
contiguity_apply_async.py
|
#Contiguity using apply_async
import pysal as ps
from collections import defaultdict
import multiprocessing as mp
import time
import sys
import ctypes
import numpy as np
from numpy.random import randint
def check_contiguity(checks,lock,weight_type='ROOK'):
cid = mp.current_process()._name
geoms = np.frombuffer(sgeoms)
geoms.shape = (2,geoms.shape[0] / 2)
offsets = np.frombuffer(soffsets) #This is float, but should be int...
contmatrix = np.frombuffer(scontmatrix)
contmatrix.shape = (len(offsets), len(offsets))
if weight_type == 'ROOK':
for polys in checks:
potential_neigh = polys.tolist()
vertices = {}
for poly in polys:
vstart = 0
vend = offsets[poly]
if poly - 1 > 0:
vstart = offsets[int(poly) - 1]
vertices[poly] = geoms[:,vstart:vend]
for k, v in vertices.iteritems():
potential_neigh.remove(k)
root_geom = v
for neigh in potential_neigh:
test_geom = vertices[neigh]
#If the geoms share a common vertex, we need to test for a common edge.
xintersects = np.intersect1d(root_geom[0], test_geom[0])
if len(xintersects) > 1:
yintersects = np.intersect1d(root_geom[1], test_geom[1])
if len(yintersects) > 1:
#We have two shared points - are they adjacent in the poly geom, i.e. an edge?
x1root = np.where(root_geom[0] == xintersects[0])[0]
x2root = np.where(root_geom[0] == xintersects[1])[0]
if np.absolute(x1root - x2root).any() == 1:
x1test = np.where(test_geom[0] == xintersects[0])[0]
x2test = np.where(test_geom[0] == xintersects[1])[0]
if np.absolute(x1test - x2test).any() == 1:
with lock:
contmatrix[k, neigh] += 1
contmatrix[neigh, k] += 1
def global_pointers(_cgeoms, _coffsets, _contmatrix):
global sgeoms
global soffsets
global scontmatrix
sgeoms = _cgeoms
soffsets = _coffsets
scontmatrix = _contmatrix
if __name__ == "__main__":
if len(sys.argv) > 1:
cores = int(sys.argv[1])
else:
cores = mp.cpu_count()
#print "This version uses apply_async with a callback function and {0} cores.".format(cores)
#fnames = ['1024_lattice.shp', '10000_lattice.shp', '50176_lattice.shp', '100489_lattice.shp', '1000_poly.shp', '10000_poly.shp', '50000_poly.shp', '100000_poly.shp']
fnames = ['2500_poly.shp']
for fname in fnames:
ta = time.time() #Global time keeper
t1 = time.time()
#Phase 1: Bin the shapefile
shpFileObject = ps.open(fname)
t2 = time.time()
print "Reading the shapefile took {} seconds".format(t2-t1)
t1 = time.time()
if shpFileObject.type != ps.cg.Polygon:
break
t2 = time.time()
print "Checking the geometry took {} seconds".format(t2-t1)
t1 = time.time()
shapebox = shpFileObject.bbox # bounding box
numPoly = len(shpFileObject)
t2 = time.time()
print "Getting the BBox and length took {} seconds".format(t2-t1)
t1 = time.time()
t3 = time.time()
ranseq = sorted([randint(0,numPoly) for r in xrange(5)])
geomx = []
geomy = []
bboxes = np.empty((numPoly, 4))
pieces = 0
total_perim = 0
lens = np.empty(numPoly)
t4 = time.time()
for g in xrange(numPoly):
shpobj = shpFileObject.get(g)
x, y = zip(*shpobj.vertices)
geomx += x
geomy += y
lens[g] = shpobj.len
bboxes[g][:] = shpobj.bounding_box[:] #Add 0.3 seconds for 5625 super inefficient!
if g in ranseq:
pieces += lens[g] - 1
total_perim += shpobj.perimeter
cellsize = total_perim / pieces * 1.
cellsize *= 2 #This needs to be tests, is a cell size of l better ot l*c
geoms = np.empty((2, len(geomx)))
geoms[0] = geomx
geoms[1] = geomy
del geomx, geomy
t2 = time.time()
print "***THIS IS ALL READ TIME***"
print "Flattening vertices and cellsize computation required {} seconds".format(t2 - t1)
print " Within this {} seconds were used for allocation".format(t4-t3)
print "***DONE READING***"
print "Processing with a cell size of {} units".format(cellsize)
t1 = time.time()
xdimension = abs(int((shapebox[2] - shapebox[0]) / cellsize))
ydimension = abs(int((shapebox[3] - shapebox[1]) / cellsize))
#Partition the space into a regular grid
xmesh = np.linspace(shapebox[0], shapebox[2], xdimension)
ymesh = np.linspace(shapebox[1], shapebox[3], ydimension)
xv, yv = np.meshgrid(xmesh,ymesh)
memship = np.empty((numPoly, 5), dtype=np.int)
#Intersect the BBoxes with the meshgrid
memship[:,2] = np.searchsorted(yv[:,0], bboxes[:,1], side='left')
memship[:,3] = np.searchsorted(yv[:,0], bboxes[:,3], side='left')
memship[:,0] = np.searchsorted(xv[0], bboxes[:,0], side='left')
memship[:,1] = np.searchsorted(xv[0], bboxes[:,2], side='left')
#Fix floating point inaccuracies, i.e. all the 0s and all the max + 1 values
ystart = memship[:,2]
ystart[ystart == 0] = 1
xstart = memship[:,0]
xstart[xstart == 0] = 1
ystop = memship[:,3]
ystop[ystop == len(yv[:,0] + 1)] = len(yv[:,0])
xstop = memship[:,1]
xstop[xstop == len(xv[0]) + 1] = len(xv[0])
#Add the keys
memship[:,4] = indices = np.arange(len(bboxes))
#Lexicographical sort on xstart, ystart, xend, yend
ind = np.lexsort((memship[:,0], memship[:,2], memship[:,1], memship[:,3]))
sortmem = memship[ind]
t2 = time.time()
print "Getting buckets and generating data structure took {} seconds.".format(t2-t1)
t1 = time.time()
potential_neighbors = {}
#Can this be vectorized or use itertools?
for i in xrange(1, len(xv[0])):
stepback = {} #A list of x and y crossers that we need to deincrement x for
crosseridx = np.where((sortmem[:,0]==i) & (sortmem[:,1]!=sortmem[:,0]))
crosseridy = np.where((sortmem[:,0]==i)\
& (sortmem[:,2]!=sortmem[:,3])\
& (sortmem[:,1]!=sortmem[:,0]))
yrollback = sortmem[crosseridy, 2]
for j in xrange(1, len(yv[:,0])):
#Step over all y cells in the x column
yidx = np.logical_and(sortmem[:,0] == i, sortmem[:,2] == j)
if len(sortmem[yidx, -1]) > 0:
potential_neighbors[(i,j)] = sortmem[yidx, -1]
#Same idea as below, but with all j in this i - using bitwise operators
# should be safe as arrays are all boolean checks.
idx = np.where((sortmem[:,2]==j) & (sortmem[:,2]!=sortmem[:,3]) & (sortmem[:,0]==i))
sortmem[idx,2] = (j + 1)
#We know that all the values are sorted, so if start != end, increment
# start until it start == end. Then the poly is added to all
# row / column pairs between start and end.
sortmem[crosseridx, 0] = (i + 1)
#Rollback the y crossers for the new x.
sortmem[crosseridy,2] = yrollback
t2 = time.time()
print "Extracting vectors to polygon membership lists too {} seconds".format(t2-t1)
t1 = time.time()
#Can I get a vertex count from a shapefile header?
# If so no need for lists to arrays, just allocate and pack.
cgeoms = mp.RawArray(ctypes.c_double, geoms.size)
npgeoms = np.frombuffer(cgeoms)
npgeoms.shape = (2, geoms.shape[1])
npgeoms[:] = geoms
coffsets = mp.RawArray(ctypes.c_int, lens.size * 2)
npoffsets = np.frombuffer(coffsets)
npoffsets[:] = np.cumsum(lens)
contmatrix = mp.RawArray(ctypes.c_int, (lens.size * lens.size * 2))
npcontmatrix = np.frombuffer(contmatrix)
npcontmatrix.shape = (lens.size, lens.size)
npcontmatrix[:] = 0
global_pointers(cgeoms, coffsets, contmatrix)
t2 = time.time()
print "Creating ctype shared memory vertices took {} seconds".format(t2-t1)
'''
t1 = time.time()
cores = mp.cpu_count()
pool = mp.Pool(cores)
t2 = time.time()
print "Initializing the pool of workers took {} seconds".format(t2 - t1)
'''
t1 = time.time()
#We don't care what 'cell' polys are in, only that they
# might be neighbors.
neighbor_checks = [v for v in potential_neighbors.itervalues()]
starts = range(0,len(neighbor_checks), len(neighbor_checks) / cores)
stops = starts[1:]
if len(stops) == 1:
stops.append(len(neighbor_checks))
offsets = [ range(z[0],z[1]) for z in zip(starts, stops)]
t2 = time.time()
print "Computing decomposition took {} seconds".format(t2-t1)
t1 = time.time()
jobs = []
lock = mp.Lock()
for offset in offsets:
checks = [neighbor_checks[j] for j in offset]
job = mp.Process(target=check_contiguity, args=(checks,lock, 'ROOK'))
jobs.append(job)
for job in jobs:
job.start()
for job in jobs:
job.join()
t2 = time.time()
print "Multicore contiguity check took {} seconds".format(t2-t1)
t1 = time.time()
w = {}
nonzero =np.transpose(np.nonzero(npcontmatrix))
for i in range(numPoly):
neigh = nonzero[nonzero[:,0] == i]
w[i] = neigh[:,1].tolist()
t2 = time.time()
print "Generating a W from a sparse matrix took {} seconds".format(t2-t1)
tb = time.time()
print "Total processing time was {} seconds".format(tb-ta)
|
test_caching.py
|
import datetime
from itertools import count
import os
import threading
import time
import urllib.parse
import pytest
import cherrypy
from cherrypy.lib import httputil
from cherrypy.test import helper
curdir = os.path.join(os.getcwd(), os.path.dirname(__file__))
gif_bytes = (
b'GIF89a\x01\x00\x01\x00\x82\x00\x01\x99"\x1e\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00,\x00\x00\x00\x00\x01\x00\x01\x00\x02\x03\x02\x08\t\x00;'
)
class CacheTest(helper.CPWebCase):
@staticmethod
def setup_server():
@cherrypy.config(**{'tools.caching.on': True})
class Root:
def __init__(self):
self.counter = 0
self.control_counter = 0
self.longlock = threading.Lock()
@cherrypy.expose
def index(self):
self.counter += 1
msg = 'visit #%s' % self.counter
return msg
@cherrypy.expose
def control(self):
self.control_counter += 1
return 'visit #%s' % self.control_counter
@cherrypy.expose
def a_gif(self):
cherrypy.response.headers[
'Last-Modified'] = httputil.HTTPDate()
return gif_bytes
@cherrypy.expose
def long_process(self, seconds='1'):
try:
self.longlock.acquire()
time.sleep(float(seconds))
finally:
self.longlock.release()
return 'success!'
@cherrypy.expose
def clear_cache(self, path):
cherrypy._cache.store[cherrypy.request.base + path].clear()
@cherrypy.config(**{
'tools.caching.on': True,
'tools.response_headers.on': True,
'tools.response_headers.headers': [
('Vary', 'Our-Varying-Header')
],
})
class VaryHeaderCachingServer(object):
def __init__(self):
self.counter = count(1)
@cherrypy.expose
def index(self):
return 'visit #%s' % next(self.counter)
@cherrypy.config(**{
'tools.expires.on': True,
'tools.expires.secs': 60,
'tools.staticdir.on': True,
'tools.staticdir.dir': 'static',
'tools.staticdir.root': curdir,
})
class UnCached(object):
@cherrypy.expose
@cherrypy.config(**{'tools.expires.secs': 0})
def force(self):
cherrypy.response.headers['Etag'] = 'bibbitybobbityboo'
self._cp_config['tools.expires.force'] = True
self._cp_config['tools.expires.secs'] = 0
return 'being forceful'
@cherrypy.expose
def dynamic(self):
cherrypy.response.headers['Etag'] = 'bibbitybobbityboo'
cherrypy.response.headers['Cache-Control'] = 'private'
return 'D-d-d-dynamic!'
@cherrypy.expose
def cacheable(self):
cherrypy.response.headers['Etag'] = 'bibbitybobbityboo'
return "Hi, I'm cacheable."
@cherrypy.expose
@cherrypy.config(**{'tools.expires.secs': 86400})
def specific(self):
cherrypy.response.headers[
'Etag'] = 'need_this_to_make_me_cacheable'
return 'I am being specific'
class Foo(object):
pass
@cherrypy.expose
@cherrypy.config(**{'tools.expires.secs': Foo()})
def wrongtype(self):
cherrypy.response.headers[
'Etag'] = 'need_this_to_make_me_cacheable'
return 'Woops'
@cherrypy.config(**{
'tools.gzip.mime_types': ['text/*', 'image/*'],
'tools.caching.on': True,
'tools.staticdir.on': True,
'tools.staticdir.dir': 'static',
'tools.staticdir.root': curdir
})
class GzipStaticCache(object):
pass
cherrypy.tree.mount(Root())
cherrypy.tree.mount(UnCached(), '/expires')
cherrypy.tree.mount(VaryHeaderCachingServer(), '/varying_headers')
cherrypy.tree.mount(GzipStaticCache(), '/gzip_static_cache')
cherrypy.config.update({'tools.gzip.on': True})
def testCaching(self):
elapsed = 0.0
for trial in range(10):
self.getPage('/')
# The response should be the same every time,
# except for the Age response header.
self.assertBody('visit #1')
if trial != 0:
age = int(self.assertHeader('Age'))
self.assert_(age >= elapsed)
elapsed = age
# POST, PUT, DELETE should not be cached.
self.getPage('/', method='POST')
self.assertBody('visit #2')
# Because gzip is turned on, the Vary header should always Vary for
# content-encoding
self.assertHeader('Vary', 'Accept-Encoding')
# The previous request should have invalidated the cache,
# so this request will recalc the response.
self.getPage('/', method='GET')
self.assertBody('visit #3')
# ...but this request should get the cached copy.
self.getPage('/', method='GET')
self.assertBody('visit #3')
self.getPage('/', method='DELETE')
self.assertBody('visit #4')
# The previous request should have invalidated the cache,
# so this request will recalc the response.
self.getPage('/', method='GET', headers=[('Accept-Encoding', 'gzip')])
self.assertHeader('Content-Encoding', 'gzip')
self.assertHeader('Vary')
self.assertEqual(
cherrypy.lib.encoding.decompress(self.body), b'visit #5')
# Now check that a second request gets the gzip header and gzipped body
# This also tests a bug in 3.0 to 3.0.2 whereby the cached, gzipped
# response body was being gzipped a second time.
self.getPage('/', method='GET', headers=[('Accept-Encoding', 'gzip')])
self.assertHeader('Content-Encoding', 'gzip')
self.assertEqual(
cherrypy.lib.encoding.decompress(self.body), b'visit #5')
# Now check that a third request that doesn't accept gzip
# skips the cache (because the 'Vary' header denies it).
self.getPage('/', method='GET')
self.assertNoHeader('Content-Encoding')
self.assertBody('visit #6')
def testVaryHeader(self):
self.getPage('/varying_headers/')
self.assertStatus('200 OK')
self.assertHeaderItemValue('Vary', 'Our-Varying-Header')
self.assertBody('visit #1')
# Now check that different 'Vary'-fields don't evict each other.
# This test creates 2 requests with different 'Our-Varying-Header'
# and then tests if the first one still exists.
self.getPage('/varying_headers/',
headers=[('Our-Varying-Header', 'request 2')])
self.assertStatus('200 OK')
self.assertBody('visit #2')
self.getPage('/varying_headers/',
headers=[('Our-Varying-Header', 'request 2')])
self.assertStatus('200 OK')
self.assertBody('visit #2')
self.getPage('/varying_headers/')
self.assertStatus('200 OK')
self.assertBody('visit #1')
def testExpiresTool(self):
# test setting an expires header
self.getPage('/expires/specific')
self.assertStatus('200 OK')
self.assertHeader('Expires')
# test exceptions for bad time values
self.getPage('/expires/wrongtype')
self.assertStatus(500)
self.assertInBody('TypeError')
# static content should not have "cache prevention" headers
self.getPage('/expires/index.html')
self.assertStatus('200 OK')
self.assertNoHeader('Pragma')
self.assertNoHeader('Cache-Control')
self.assertHeader('Expires')
# dynamic content that sets indicators should not have
# "cache prevention" headers
self.getPage('/expires/cacheable')
self.assertStatus('200 OK')
self.assertNoHeader('Pragma')
self.assertNoHeader('Cache-Control')
self.assertHeader('Expires')
self.getPage('/expires/dynamic')
self.assertBody('D-d-d-dynamic!')
# the Cache-Control header should be untouched
self.assertHeader('Cache-Control', 'private')
self.assertHeader('Expires')
# configure the tool to ignore indicators and replace existing headers
self.getPage('/expires/force')
self.assertStatus('200 OK')
# This also gives us a chance to test 0 expiry with no other headers
self.assertHeader('Pragma', 'no-cache')
if cherrypy.server.protocol_version == 'HTTP/1.1':
self.assertHeader('Cache-Control', 'no-cache, must-revalidate')
self.assertHeader('Expires', 'Sun, 28 Jan 2007 00:00:00 GMT')
# static content should now have "cache prevention" headers
self.getPage('/expires/index.html')
self.assertStatus('200 OK')
self.assertHeader('Pragma', 'no-cache')
if cherrypy.server.protocol_version == 'HTTP/1.1':
self.assertHeader('Cache-Control', 'no-cache, must-revalidate')
self.assertHeader('Expires', 'Sun, 28 Jan 2007 00:00:00 GMT')
# the cacheable handler should now have "cache prevention" headers
self.getPage('/expires/cacheable')
self.assertStatus('200 OK')
self.assertHeader('Pragma', 'no-cache')
if cherrypy.server.protocol_version == 'HTTP/1.1':
self.assertHeader('Cache-Control', 'no-cache, must-revalidate')
self.assertHeader('Expires', 'Sun, 28 Jan 2007 00:00:00 GMT')
self.getPage('/expires/dynamic')
self.assertBody('D-d-d-dynamic!')
# dynamic sets Cache-Control to private but it should be
# overwritten here ...
self.assertHeader('Pragma', 'no-cache')
if cherrypy.server.protocol_version == 'HTTP/1.1':
self.assertHeader('Cache-Control', 'no-cache, must-revalidate')
self.assertHeader('Expires', 'Sun, 28 Jan 2007 00:00:00 GMT')
def _assert_resp_len_and_enc_for_gzip(self, uri):
"""
Test that after querying gzipped content it's remains valid in
cache and available non-gzipped as well.
"""
ACCEPT_GZIP_HEADERS = [('Accept-Encoding', 'gzip')]
content_len = None
for _ in range(3):
self.getPage(uri, method='GET', headers=ACCEPT_GZIP_HEADERS)
if content_len is not None:
# all requests should get the same length
self.assertHeader('Content-Length', content_len)
self.assertHeader('Content-Encoding', 'gzip')
content_len = dict(self.headers)['Content-Length']
# check that we can still get non-gzipped version
self.getPage(uri, method='GET')
self.assertNoHeader('Content-Encoding')
# non-gzipped version should have a different content length
self.assertNoHeaderItemValue('Content-Length', content_len)
def testGzipStaticCache(self):
"""Test that cache and gzip tools play well together when both enabled.
Ref GitHub issue #1190.
"""
GZIP_STATIC_CACHE_TMPL = '/gzip_static_cache/{}'
resource_files = ('index.html', 'dirback.jpg')
for f in resource_files:
uri = GZIP_STATIC_CACHE_TMPL.format(f)
self._assert_resp_len_and_enc_for_gzip(uri)
def testLastModified(self):
self.getPage('/a.gif')
self.assertStatus(200)
self.assertBody(gif_bytes)
lm1 = self.assertHeader('Last-Modified')
# this request should get the cached copy.
self.getPage('/a.gif')
self.assertStatus(200)
self.assertBody(gif_bytes)
self.assertHeader('Age')
lm2 = self.assertHeader('Last-Modified')
self.assertEqual(lm1, lm2)
# this request should match the cached copy, but raise 304.
self.getPage('/a.gif', [('If-Modified-Since', lm1)])
self.assertStatus(304)
self.assertNoHeader('Last-Modified')
if not getattr(cherrypy.server, 'using_apache', False):
self.assertHeader('Age')
@pytest.mark.xfail(reason='#1536')
def test_antistampede(self):
SECONDS = 4
slow_url = '/long_process?seconds={SECONDS}'.format(**locals())
# We MUST make an initial synchronous request in order to create the
# AntiStampedeCache object, and populate its selecting_headers,
# before the actual stampede.
self.getPage(slow_url)
self.assertBody('success!')
path = urllib.parse.quote(slow_url, safe='')
self.getPage('/clear_cache?path=' + path)
self.assertStatus(200)
start = datetime.datetime.now()
def run():
self.getPage(slow_url)
# The response should be the same every time
self.assertBody('success!')
ts = [threading.Thread(target=run) for i in range(100)]
for t in ts:
t.start()
for t in ts:
t.join()
finish = datetime.datetime.now()
# Allow for overhead, two seconds for slow hosts
allowance = SECONDS + 2
self.assertEqualDates(start, finish, seconds=allowance)
def test_cache_control(self):
self.getPage('/control')
self.assertBody('visit #1')
self.getPage('/control')
self.assertBody('visit #1')
self.getPage('/control', headers=[('Cache-Control', 'no-cache')])
self.assertBody('visit #2')
self.getPage('/control')
self.assertBody('visit #2')
self.getPage('/control', headers=[('Pragma', 'no-cache')])
self.assertBody('visit #3')
self.getPage('/control')
self.assertBody('visit #3')
time.sleep(1)
self.getPage('/control', headers=[('Cache-Control', 'max-age=0')])
self.assertBody('visit #4')
self.getPage('/control')
self.assertBody('visit #4')
|
test_worker_infrastructure.py
|
import threading
import logging
import time
import pytest
from dexbot.worker import WorkerInfrastructure
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s')
@pytest.fixture(scope='module')
def account(prepare_account):
account = prepare_account({'MYBASE': 10000, 'MYQUOTE': 2000})
return account
@pytest.fixture(scope='module')
def config(bitshares, account):
config = {
'node': '{}'.format(bitshares.rpc.url),
'workers': {
'echo': {'account': '{}'.format(account), 'market': 'MYQUOTE/MYBASE', 'module': 'dexbot.strategies.echo'}
},
}
return config
def test_worker_infrastructure(bitshares, config):
""" Test whether dexbot core is able to work
"""
worker_infrastructure = WorkerInfrastructure(config=config, bitshares_instance=bitshares)
def wait_then_stop():
time.sleep(1)
worker_infrastructure.do_next_tick(worker_infrastructure.stop(pause=True))
stopper = threading.Thread(target=wait_then_stop)
stopper.start()
worker_infrastructure.run()
stopper.join()
|
test.py
|
import json
import os.path as p
import random
import socket
import subprocess
import threading
import time
import avro.schema
from confluent_kafka.avro.cached_schema_registry_client import CachedSchemaRegistryClient
from confluent_kafka.avro.serializer.message_serializer import MessageSerializer
from confluent_kafka import admin
import kafka.errors
import pytest
from google.protobuf.internal.encoder import _VarintBytes
from helpers.client import QueryRuntimeException
from helpers.cluster import ClickHouseCluster
from helpers.network import PartitionManager
from helpers.test_tools import TSV
from kafka import KafkaAdminClient, KafkaProducer, KafkaConsumer
from kafka.admin import NewTopic
"""
protoc --version
libprotoc 3.0.0
# to create kafka_pb2.py
protoc --python_out=. kafka.proto
"""
from . import kafka_pb2
from . import social_pb2
# TODO: add test for run-time offset update in CH, if we manually update it on Kafka side.
# TODO: add test for SELECT LIMIT is working.
cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance('instance',
main_configs=['configs/kafka.xml', 'configs/log_conf.xml'],
with_kafka=True,
with_zookeeper=True,
macros={"kafka_broker":"kafka1",
"kafka_topic_old":"old",
"kafka_group_name_old":"old",
"kafka_topic_new":"new",
"kafka_group_name_new":"new",
"kafka_client_id":"instance",
"kafka_format_json_each_row":"JSONEachRow"},
clickhouse_path_dir='clickhouse_path')
kafka_id = ''
# Helpers
def check_kafka_is_available():
p = subprocess.Popen(('docker',
'exec',
'-i',
kafka_id,
'/usr/bin/kafka-broker-api-versions',
'--bootstrap-server',
'INSIDE://localhost:9092'),
stdout=subprocess.PIPE)
p.communicate()
return p.returncode == 0
def wait_kafka_is_available(max_retries=50):
retries = 0
while True:
if check_kafka_is_available():
break
else:
retries += 1
if retries > max_retries:
raise "Kafka is not available"
print("Waiting for Kafka to start up")
time.sleep(1)
def producer_serializer(x):
return x.encode() if isinstance(x, str) else x
def kafka_produce(topic, messages, timestamp=None, retries=2):
producer = KafkaProducer(bootstrap_servers="localhost:9092", value_serializer=producer_serializer, retries=retries, max_in_flight_requests_per_connection=1)
for message in messages:
producer.send(topic=topic, value=message, timestamp_ms=timestamp)
producer.flush()
## just to ensure the python client / producer is working properly
def kafka_producer_send_heartbeat_msg(max_retries=50):
kafka_produce('test_heartbeat_topic', ['test'], retries=max_retries)
def kafka_consume(topic):
consumer = KafkaConsumer(bootstrap_servers="localhost:9092", auto_offset_reset="earliest")
consumer.subscribe(topics=(topic))
for toppar, messages in list(consumer.poll(5000).items()):
if toppar.topic == topic:
for message in messages:
yield message.value.decode()
consumer.unsubscribe()
consumer.close()
def kafka_produce_protobuf_messages(topic, start_index, num_messages):
data = b''
for i in range(start_index, start_index + num_messages):
msg = kafka_pb2.KeyValuePair()
msg.key = i
msg.value = str(i)
serialized_msg = msg.SerializeToString()
data = data + _VarintBytes(len(serialized_msg)) + serialized_msg
producer = KafkaProducer(bootstrap_servers="localhost:9092", value_serializer=producer_serializer)
producer.send(topic=topic, value=data)
producer.flush()
print(("Produced {} messages for topic {}".format(num_messages, topic)))
def kafka_produce_protobuf_messages_no_delimeters(topic, start_index, num_messages):
data = ''
producer = KafkaProducer(bootstrap_servers="localhost:9092")
for i in range(start_index, start_index + num_messages):
msg = kafka_pb2.KeyValuePair()
msg.key = i
msg.value = str(i)
serialized_msg = msg.SerializeToString()
producer.send(topic=topic, value=serialized_msg)
producer.flush()
print("Produced {} messages for topic {}".format(num_messages, topic))
def kafka_produce_protobuf_social(topic, start_index, num_messages):
data = b''
for i in range(start_index, start_index + num_messages):
msg = social_pb2.User()
msg.username='John Doe {}'.format(i)
msg.timestamp=1000000+i
serialized_msg = msg.SerializeToString()
data = data + _VarintBytes(len(serialized_msg)) + serialized_msg
producer = KafkaProducer(bootstrap_servers="localhost:9092", value_serializer=producer_serializer)
producer.send(topic=topic, value=data)
producer.flush()
print(("Produced {} messages for topic {}".format(num_messages, topic)))
def avro_confluent_message(schema_registry_client, value):
# type: (CachedSchemaRegistryClient, dict) -> str
serializer = MessageSerializer(schema_registry_client)
schema = avro.schema.make_avsc_object({
'name': 'row',
'type': 'record',
'fields': [
{'name': 'id', 'type': 'long'},
{'name': 'blockNo', 'type': 'int'},
{'name': 'val1', 'type': 'string'},
{'name': 'val2', 'type': 'float'},
{'name': 'val3', 'type': 'int'}
]
})
return serializer.encode_record_with_schema('test_subject', schema, value)
# Since everything is async and shaky when receiving messages from Kafka,
# we may want to try and check results multiple times in a loop.
def kafka_check_result(result, check=False, ref_file='test_kafka_json.reference'):
fpath = p.join(p.dirname(__file__), ref_file)
with open(fpath) as reference:
if check:
assert TSV(result) == TSV(reference)
else:
return TSV(result) == TSV(reference)
def describe_consumer_group(name):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092")
consumer_groups = admin_client.describe_consumer_groups([name])
res = []
for member in consumer_groups[0].members:
member_info = {}
member_info['member_id'] = member.member_id
member_info['client_id'] = member.client_id
member_info['client_host'] = member.client_host
member_topics_assignment = []
for (topic, partitions) in member.member_assignment.assignment:
member_topics_assignment.append({'topic': topic, 'partitions': partitions})
member_info['assignment'] = member_topics_assignment
res.append(member_info)
return res
# Fixtures
@pytest.fixture(scope="module")
def kafka_cluster():
try:
global kafka_id
cluster.start()
kafka_id = instance.cluster.kafka_docker_id
print(("kafka_id is {}".format(kafka_id)))
yield cluster
finally:
cluster.shutdown()
@pytest.fixture(autouse=True)
def kafka_setup_teardown():
instance.query('DROP DATABASE IF EXISTS test; CREATE DATABASE test;')
wait_kafka_is_available() # ensure kafka is alive
kafka_producer_send_heartbeat_msg() # ensure python kafka client is ok
# print("kafka is available - running test")
yield # run test
# Tests
@pytest.mark.timeout(180)
def test_kafka_settings_old_syntax(kafka_cluster):
assert TSV(instance.query("SELECT * FROM system.macros WHERE macro like 'kafka%' ORDER BY macro",
ignore_error=True)) == TSV('''kafka_broker kafka1
kafka_client_id instance
kafka_format_json_each_row JSONEachRow
kafka_group_name_new new
kafka_group_name_old old
kafka_topic_new new
kafka_topic_old old
''')
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka('{kafka_broker}:19092', '{kafka_topic_old}', '{kafka_group_name_old}', '{kafka_format_json_each_row}', '\\n');
''')
# Don't insert malformed messages since old settings syntax
# doesn't support skipping of broken messages.
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce('old', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
members = describe_consumer_group('old')
assert members[0]['client_id'] == 'ClickHouse-instance-test-kafka'
# text_desc = kafka_cluster.exec_in_container(kafka_cluster.get_container_id('kafka1'),"kafka-consumer-groups --bootstrap-server localhost:9092 --describe --members --group old --verbose"))
@pytest.mark.timeout(180)
def test_kafka_settings_new_syntax(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = '{kafka_broker}:19092',
kafka_topic_list = '{kafka_topic_new}',
kafka_group_name = '{kafka_group_name_new}',
kafka_format = '{kafka_format_json_each_row}',
kafka_row_delimiter = '\\n',
kafka_client_id = '{kafka_client_id} test 1234',
kafka_skip_broken_messages = 1;
''')
messages = []
for i in range(25):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce('new', messages)
# Insert couple of malformed messages.
kafka_produce('new', ['}{very_broken_message,'])
kafka_produce('new', ['}another{very_broken_message,'])
messages = []
for i in range(25, 50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce('new', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
members = describe_consumer_group('new')
assert members[0]['client_id'] == 'instance test 1234'
@pytest.mark.timeout(180)
def test_kafka_json_as_string(kafka_cluster):
kafka_produce('kafka_json_as_string', ['{"t": 123, "e": {"x": "woof"} }', '', '{"t": 124, "e": {"x": "test"} }',
'{"F1":"V1","F2":{"F21":"V21","F22":{},"F23":"V23","F24":"2019-12-24T16:28:04"},"F3":"V3"}'])
instance.query('''
CREATE TABLE test.kafka (field String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'kafka_json_as_string',
kafka_group_name = 'kafka_json_as_string',
kafka_format = 'JSONAsString',
kafka_flush_interval_ms=1000;
''')
result = instance.query('SELECT * FROM test.kafka;')
expected = '''\
{"t": 123, "e": {"x": "woof"} }
{"t": 124, "e": {"x": "test"} }
{"F1":"V1","F2":{"F21":"V21","F22":{},"F23":"V23","F24":"2019-12-24T16:28:04"},"F3":"V3"}
'''
assert TSV(result) == TSV(expected)
assert instance.contains_in_log(
"Parsing of message (topic: kafka_json_as_string, partition: 0, offset: 1) return no rows")
@pytest.mark.timeout(120)
def test_kafka_formats(kafka_cluster):
# data was dumped from clickhouse itself in a following manner
# clickhouse-client --format=Native --query='SELECT toInt64(number) as id, toUInt16( intDiv( id, 65536 ) ) as blockNo, reinterpretAsString(19777) as val1, toFloat32(0.5) as val2, toUInt8(1) as val3 from numbers(100) ORDER BY id' | xxd -ps | tr -d '\n' | sed 's/\(..\)/\\x\1/g'
all_formats = {
## Text formats ##
# dumped with clickhouse-client ... | perl -pe 's/\n/\\n/; s/\t/\\t/g;'
'JSONEachRow': {
'data_sample': [
'{"id":"0","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
'{"id":"1","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"2","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"3","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"4","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"5","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"6","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"7","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"8","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"9","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"10","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"11","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"12","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"13","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"14","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"15","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
'{"id":"0","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
],
'supports_empty_value': True,
},
# JSONAsString doesn't fit to that test, and tested separately
'JSONCompactEachRow': {
'data_sample': [
'["0", 0, "AM", 0.5, 1]\n',
'["1", 0, "AM", 0.5, 1]\n["2", 0, "AM", 0.5, 1]\n["3", 0, "AM", 0.5, 1]\n["4", 0, "AM", 0.5, 1]\n["5", 0, "AM", 0.5, 1]\n["6", 0, "AM", 0.5, 1]\n["7", 0, "AM", 0.5, 1]\n["8", 0, "AM", 0.5, 1]\n["9", 0, "AM", 0.5, 1]\n["10", 0, "AM", 0.5, 1]\n["11", 0, "AM", 0.5, 1]\n["12", 0, "AM", 0.5, 1]\n["13", 0, "AM", 0.5, 1]\n["14", 0, "AM", 0.5, 1]\n["15", 0, "AM", 0.5, 1]\n',
'["0", 0, "AM", 0.5, 1]\n',
],
'supports_empty_value': True,
},
'JSONCompactEachRowWithNamesAndTypes': {
'data_sample': [
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["0", 0, "AM", 0.5, 1]\n',
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["1", 0, "AM", 0.5, 1]\n["2", 0, "AM", 0.5, 1]\n["3", 0, "AM", 0.5, 1]\n["4", 0, "AM", 0.5, 1]\n["5", 0, "AM", 0.5, 1]\n["6", 0, "AM", 0.5, 1]\n["7", 0, "AM", 0.5, 1]\n["8", 0, "AM", 0.5, 1]\n["9", 0, "AM", 0.5, 1]\n["10", 0, "AM", 0.5, 1]\n["11", 0, "AM", 0.5, 1]\n["12", 0, "AM", 0.5, 1]\n["13", 0, "AM", 0.5, 1]\n["14", 0, "AM", 0.5, 1]\n["15", 0, "AM", 0.5, 1]\n',
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["0", 0, "AM", 0.5, 1]\n',
# ''
# On empty message exception: Cannot parse input: expected '[' at end of stream., Stack trace (when copying this message, always include the lines below):
# /src/IO/ReadHelpers.h:175: DB::assertChar(char, DB::ReadBuffer&) @ 0x15db231a in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/JSONCompactEachRowRowInputFormat.cpp:0: DB::JSONCompactEachRowRowInputFormat::readPrefix() @ 0x1dee6bd6 in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:0: DB::IRowInputFormat::generate() @ 0x1de72710 in /usr/bin/clickhouse
],
},
'TSKV': {
'data_sample': [
'id=0\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
'id=1\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=2\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=3\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=4\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=5\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=6\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=7\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=8\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=9\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=10\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=11\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=12\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=13\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=14\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=15\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
'id=0\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
# ''
# On empty message exception: Unexpected end of stream while reading key name from TSKV format
# /src/Processors/Formats/Impl/TSKVRowInputFormat.cpp:88: DB::readName(DB::ReadBuffer&, StringRef&, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >&) @ 0x1df8c098 in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/TSKVRowInputFormat.cpp:114: DB::TSKVRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1df8ae3e in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:64: DB::IRowInputFormat::generate() @ 0x1de727cf in /usr/bin/clickhouse
],
},
'CSV': {
'data_sample': [
'0,0,"AM",0.5,1\n',
'1,0,"AM",0.5,1\n2,0,"AM",0.5,1\n3,0,"AM",0.5,1\n4,0,"AM",0.5,1\n5,0,"AM",0.5,1\n6,0,"AM",0.5,1\n7,0,"AM",0.5,1\n8,0,"AM",0.5,1\n9,0,"AM",0.5,1\n10,0,"AM",0.5,1\n11,0,"AM",0.5,1\n12,0,"AM",0.5,1\n13,0,"AM",0.5,1\n14,0,"AM",0.5,1\n15,0,"AM",0.5,1\n',
'0,0,"AM",0.5,1\n',
],
'supports_empty_value': True,
},
'TSV': {
'data_sample': [
'0\t0\tAM\t0.5\t1\n',
'1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'0\t0\tAM\t0.5\t1\n',
],
'supports_empty_value': True,
},
'CSVWithNames': {
'data_sample': [
'"id","blockNo","val1","val2","val3"\n0,0,"AM",0.5,1\n',
'"id","blockNo","val1","val2","val3"\n1,0,"AM",0.5,1\n2,0,"AM",0.5,1\n3,0,"AM",0.5,1\n4,0,"AM",0.5,1\n5,0,"AM",0.5,1\n6,0,"AM",0.5,1\n7,0,"AM",0.5,1\n8,0,"AM",0.5,1\n9,0,"AM",0.5,1\n10,0,"AM",0.5,1\n11,0,"AM",0.5,1\n12,0,"AM",0.5,1\n13,0,"AM",0.5,1\n14,0,"AM",0.5,1\n15,0,"AM",0.5,1\n',
'"id","blockNo","val1","val2","val3"\n0,0,"AM",0.5,1\n',
# '',
# On empty message exception happens: Attempt to read after eof
# /src/IO/VarInt.h:122: DB::throwReadAfterEOF() @ 0x15c34487 in /usr/bin/clickhouse
# /src/IO/ReadHelpers.cpp:583: void DB::readCSVStringInto<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > >(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >&, DB::ReadBuffer&, DB::FormatSettings::CSV const&) @ 0x15c961e1 in /usr/bin/clickhouse
# /src/IO/ReadHelpers.cpp:678: DB::readCSVString(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >&, DB::ReadBuffer&, DB::FormatSettings::CSV const&) @ 0x15c8dfae in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/CSVRowInputFormat.cpp:170: DB::CSVRowInputFormat::readPrefix() @ 0x1dec46f7 in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:0: DB::IRowInputFormat::generate() @ 0x1de72710 in /usr/bin/clickhouse
# /src/Processors/ISource.cpp:48: DB::ISource::work() @ 0x1dd79737 in /usr/bin/clickhouse
],
},
'Values': {
'data_sample': [
"(0,0,'AM',0.5,1)",
"(1,0,'AM',0.5,1),(2,0,'AM',0.5,1),(3,0,'AM',0.5,1),(4,0,'AM',0.5,1),(5,0,'AM',0.5,1),(6,0,'AM',0.5,1),(7,0,'AM',0.5,1),(8,0,'AM',0.5,1),(9,0,'AM',0.5,1),(10,0,'AM',0.5,1),(11,0,'AM',0.5,1),(12,0,'AM',0.5,1),(13,0,'AM',0.5,1),(14,0,'AM',0.5,1),(15,0,'AM',0.5,1)",
"(0,0,'AM',0.5,1)",
],
'supports_empty_value': True,
},
'TSVWithNames': {
'data_sample': [
'id\tblockNo\tval1\tval2\tval3\n0\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\n1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\n0\t0\tAM\t0.5\t1\n',
],
'supports_empty_value': True,
},
'TSVWithNamesAndTypes': {
'data_sample': [
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\t0\tAM\t0.5\t1\n',
# '',
# On empty message exception happens: Cannot parse input: expected '\n' at end of stream.
# /src/IO/ReadHelpers.cpp:84: DB::throwAtAssertionFailed(char const*, DB::ReadBuffer&) @ 0x15c8d8ec in /usr/bin/clickhouse
# /src/IO/ReadHelpers.h:175: DB::assertChar(char, DB::ReadBuffer&) @ 0x15db231a in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp:24: DB::skipTSVRow(DB::ReadBuffer&, unsigned long) @ 0x1df92fac in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp:168: DB::TabSeparatedRowInputFormat::readPrefix() @ 0x1df92df0 in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:0: DB::IRowInputFormat::generate() @ 0x1de72710 in /usr/bin/clickhouse
],
},
# 'Template' : {
# 'data_sample' : [
# '(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
# # '(id = 1, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 2, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 3, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 4, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 5, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 6, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 7, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 8, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 9, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 10, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 11, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 12, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 13, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 14, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 15, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
# # '(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
# # '' # tolerates
# ],
# 'extra_settings': ", format_template_row='template_row.format'"
# },
'Regexp': {
'data_sample': [
'(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
'(id = 1, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 2, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 3, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 4, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 5, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 6, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 7, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 8, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 9, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 10, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 11, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 12, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 13, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 14, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 15, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
'(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
# ''
# On empty message exception happens: Line "" doesn't match the regexp.: (at row 1)
# /src/Processors/Formats/Impl/RegexpRowInputFormat.cpp:140: DB::RegexpRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1df82fcb in /usr/bin/clickhouse
],
'extra_settings': r", format_regexp='\(id = (.+?), blockNo = (.+?), val1 = \"(.+?)\", val2 = (.+?), val3 = (.+?)\)', format_regexp_escaping_rule='Escaped'"
},
## BINARY FORMATS
# dumped with
# clickhouse-client ... | xxd -ps -c 200 | tr -d '\n' | sed 's/\(..\)/\\x\1/g'
'Native': {
'data_sample': [
b'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01',
b'\x05\x0f\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01',
b'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01',
# ''
# On empty message exception happens: DB::Exception: Attempt to read after eof
# /src/IO/VarInt.h:122: DB::throwReadAfterEOF() @ 0x15c34487 in /usr/bin/clickhouse
# /src/IO/VarInt.h:135: void DB::readVarUIntImpl<false>(unsigned long&, DB::ReadBuffer&) @ 0x15c68bb7 in /usr/bin/clickhouse
# /src/IO/VarInt.h:149: DB::readVarUInt(unsigned long&, DB::ReadBuffer&) @ 0x15c68844 in /usr/bin/clickhouse
# /src/DataStreams/NativeBlockInputStream.cpp:124: DB::NativeBlockInputStream::readImpl() @ 0x1d3e2778 in /usr/bin/clickhouse
# /src/DataStreams/IBlockInputStream.cpp:60: DB::IBlockInputStream::read() @ 0x1c9c92fd in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/NativeFormat.h:42: DB::NativeInputFormatFromNativeBlockInputStream::generate() @ 0x1df1ea79 in /usr/bin/clickhouse
# /src/Processors/ISource.cpp:48: DB::ISource::work() @ 0x1dd79737 in /usr/bin/clickhouse
],
},
'MsgPack': {
'data_sample': [
b'\x00\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01',
b'\x01\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x02\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x03\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x04\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x05\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x06\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x07\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x08\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x09\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0a\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0b\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0c\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0d\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0e\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0f\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01',
b'\x00\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01',
# ''
# On empty message exception happens: Unexpected end of file while parsing msgpack object.: (at row 1)
# coming from Processors/Formats/Impl/MsgPackRowInputFormat.cpp:170
],
},
'RowBinary': {
'data_sample': [
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
# ''
# On empty message exception happens: DB::Exception: Cannot read all data. Bytes read: 0. Bytes expected: 8.
# /src/IO/ReadBuffer.h:157: DB::ReadBuffer::readStrict(char*, unsigned long) @ 0x15c6894d in /usr/bin/clickhouse
# /src/IO/ReadHelpers.h:108: void DB::readPODBinary<long>(long&, DB::ReadBuffer&) @ 0x15c67715 in /usr/bin/clickhouse
# /src/IO/ReadHelpers.h:737: std::__1::enable_if<is_arithmetic_v<long>, void>::type DB::readBinary<long>(long&, DB::ReadBuffer&) @ 0x15e7afbd in /usr/bin/clickhouse
# /src/DataTypes/DataTypeNumberBase.cpp:180: DB::DataTypeNumberBase<long>::deserializeBinary(DB::IColumn&, DB::ReadBuffer&) const @ 0x1cace581 in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/BinaryRowInputFormat.cpp:22: DB::BinaryRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1dea2c0b in /usr/bin/clickhouse
],
},
'RowBinaryWithNamesAndTypes': {
'data_sample': [
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
# ''
# !!! On empty message segfault: Address not mapped to object
# /contrib/FastMemcpy/FastMemcpy.h:666: memcpy_fast @ 0x21742d65 in /usr/bin/clickhouse
# /contrib/FastMemcpy/memcpy_wrapper.c:5: memcpy @ 0x21738235 in /usr/bin/clickhouse
# /src/IO/ReadBuffer.h:145: DB::ReadBuffer::read(char*, unsigned long) @ 0x15c369d7 in /usr/bin/clickhouse
# /src/IO/ReadBuffer.h:155: DB::ReadBuffer::readStrict(char*, unsigned long) @ 0x15c68878 in /usr/bin/clickhouse
# /src/DataTypes/DataTypeString.cpp:84: DB::DataTypeString::deserializeBinary(DB::IColumn&, DB::ReadBuffer&) const @ 0x1cad12e7 in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/BinaryRowInputFormat.cpp:22: DB::BinaryRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1dea2c0b in /usr/bin/clickhouse
],
},
'Protobuf': {
'data_sample': [
b'\x0b\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01',
b'\x0d\x08\x01\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x02\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x03\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x04\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x05\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x06\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x07\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x08\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x09\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0a\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0b\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0c\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0d\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0e\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0f\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01',
b'\x0b\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01',
# ''
# On empty message exception: Attempt to read after eof
# /src/IO/ReadBuffer.h:184: DB::ReadBuffer::throwReadAfterEOF() @ 0x15c9699b in /usr/bin/clickhouse
# /src/Formats/ProtobufReader.h:115: DB::ProtobufReader::SimpleReader::startMessage() @ 0x1df4f828 in /usr/bin/clickhouse
# /src/Formats/ProtobufReader.cpp:1119: DB::ProtobufReader::startMessage() @ 0x1df5356c in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/ProtobufRowInputFormat.cpp:25: DB::ProtobufRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1df4cc71 in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:64: DB::IRowInputFormat::generate() @ 0x1de727cf in /usr/bin/clickhouse
],
'extra_settings': ", kafka_schema='test:TestMessage'"
},
'ORC': {
'data_sample': [
b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x01\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x46\x25\x0e\x2e\x46\x03\x21\x46\x03\x09\xa6\x00\x06\x00\x32\x00\x00\xe3\x92\xe4\x62\x65\x00\x01\x21\x01\x0e\x46\x25\x2e\x2e\x26\x47\x5f\x21\x20\x96\x60\x09\x60\x00\x00\x36\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x46\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x10\x11\xc0\x00\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x05\x00\x00\xff\x00\x03\x00\x00\x30\x07\x00\x00\x40\x00\x80\x05\x00\x00\x41\x4d\x07\x00\x00\x42\x00\x80\x03\x00\x00\x0a\x07\x00\x00\x42\x00\x80\x05\x00\x00\xff\x01\x88\x00\x00\x4d\xca\xc1\x0a\x80\x30\x0c\x03\xd0\x2e\x6b\xcb\x98\x17\xf1\x14\x50\xfc\xff\xcf\xb4\x66\x1e\x3c\x84\x47\x9a\xce\x1c\xb9\x1b\xb7\xf9\xda\x48\x09\x9e\xb2\xf3\x92\xce\x5b\x86\xf6\x56\x7f\x21\x41\x2f\x51\xa6\x7a\xd7\x1d\xe5\xea\xae\x3d\xca\xd5\x83\x71\x60\xd8\x17\xfc\x62\x0f\xa8\x00\x00\xe3\x4a\xe6\x62\xe1\x60\x0c\x60\xe0\xe2\xe3\x60\x14\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\x60\x54\xe2\xe0\x62\x34\x10\x62\x34\x90\x60\x02\x8a\x70\x71\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\x82\x05\x28\xc6\xcd\x25\xca\xc1\x68\xc4\x0b\x52\xc5\x6c\xa0\x67\x2a\x05\x22\xc0\x4a\x21\x86\x31\x09\x30\x81\xb5\xb2\x02\x00\x36\x01\x00\x25\x8c\xbd\x0a\xc2\x30\x14\x85\x73\x6f\x92\xf6\x92\x6a\x09\x01\x21\x64\x92\x4e\x75\x91\x58\x71\xc9\x64\x27\x5d\x2c\x1d\x5d\xfd\x59\xc4\x42\x37\x5f\xc0\x17\xe8\x23\x9b\xc6\xe1\x3b\x70\x0f\xdf\xb9\xc4\xf5\x17\x5d\x41\x5c\x4f\x60\x37\xeb\x53\x0d\x55\x4d\x0b\x23\x01\xb9\x90\x2e\xbf\x0f\xe3\xe3\xdd\x8d\x0e\x5f\x4f\x27\x3e\xb7\x61\x97\xb2\x49\xb9\xaf\x90\x20\x92\x27\x32\x2a\x6b\xf4\xf3\x0d\x1e\x82\x20\xe8\x59\x28\x09\x4c\x46\x4c\x33\xcb\x7a\x76\x95\x41\x47\x9f\x14\x78\x03\xde\x62\x6c\x54\x30\xb1\x51\x0a\xdb\x8b\x89\x58\x11\xbb\x22\xac\x08\x9a\xe5\x6c\x71\xbf\x3d\xb8\x39\x92\xfa\x7f\x86\x1a\xd3\x54\x1e\xa7\xee\xcc\x7e\x08\x9e\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x57\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x0f\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x0f\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x7e\x25\x0e\x2e\x46\x43\x21\x46\x4b\x09\xad\x00\x06\x00\x33\x00\x00\x0a\x17\x0a\x03\x00\x00\x00\x12\x10\x08\x0f\x22\x0a\x0a\x02\x41\x4d\x12\x02\x41\x4d\x18\x3c\x50\x00\x3a\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x7e\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x66\x73\x3d\xd3\x00\x06\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x0f\x12\x06\x08\x02\x10\x02\x18\x1e\x50\x00\x05\x00\x00\x0c\x00\x2b\x00\x00\x31\x32\x33\x34\x35\x36\x37\x38\x39\x31\x30\x31\x31\x31\x32\x31\x33\x31\x34\x31\x35\x09\x00\x00\x06\x01\x03\x02\x09\x00\x00\xc0\x0e\x00\x00\x07\x00\x00\x42\x00\x80\x05\x00\x00\x41\x4d\x0a\x00\x00\xe3\xe2\x42\x01\x00\x09\x00\x00\xc0\x0e\x02\x00\x05\x00\x00\x0c\x01\x94\x00\x00\x2d\xca\xc1\x0e\x80\x30\x08\x03\xd0\xc1\x60\x2e\xf3\x62\x76\x6a\xe2\x0e\xfe\xff\x57\x5a\x3b\x0f\xe4\x51\xe8\x68\xbd\x5d\x05\xe7\xf8\x34\x40\x3a\x6e\x59\xb1\x64\xe0\x91\xa9\xbf\xb1\x97\xd2\x95\x9d\x1e\xca\x55\x3a\x6d\xb4\xd2\xdd\x0b\x74\x9a\x74\xf7\x12\x39\xbd\x97\x7f\x7c\x06\xbb\xa6\x8d\x97\x17\xb4\x00\x00\xe3\x4a\xe6\x62\xe1\xe0\x0f\x60\xe0\xe2\xe3\xe0\x17\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\xe0\x57\xe2\xe0\x62\x34\x14\x62\xb4\x94\xd0\x02\x8a\xc8\x73\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\xc2\x06\x28\x26\xc4\x25\xca\xc1\x6f\xc4\xcb\xc5\x68\x20\xc4\x6c\xa0\x67\x2a\xc5\x6c\xae\x67\x0a\x14\xe6\x87\x1a\xc6\x24\xc0\x24\x21\x07\x32\x0c\x00\x4a\x01\x00\xe3\x60\x16\x58\xc3\x24\xc5\xcd\xc1\x2c\x30\x89\x51\xc2\x4b\xc1\x57\x83\x5f\x49\x83\x83\x47\x88\x95\x91\x89\x99\x85\x55\x8a\x3d\x29\x27\x3f\x39\xdb\x2f\x5f\x8a\x29\x33\x45\x8a\xa5\x2c\x31\xc7\x10\x4c\x1a\x81\x49\x63\x25\x26\x0e\x46\x20\x66\x07\x63\x36\x0e\x3e\x0d\x26\x03\x10\x9f\xd1\x80\xdf\x8a\x85\x83\x3f\x80\xc1\x8a\x8f\x83\x5f\x88\x8d\x83\x41\x80\x41\x82\x21\x80\x21\x82\xd5\x4a\x80\x83\x5f\x89\x83\x8b\xd1\x50\x88\xd1\x52\x42\x0b\x28\x22\x6f\x25\x04\x14\xe1\xe2\x62\x72\xf4\x15\x02\x62\x09\x1b\xa0\x98\x90\x95\x28\x07\xbf\x11\x2f\x17\xa3\x81\x10\xb3\x81\x9e\xa9\x14\xb3\xb9\x9e\x29\x50\x98\x1f\x6a\x18\x93\x00\x93\x84\x1c\xc8\x30\x87\x09\x7e\x1e\x0c\x00\x08\xa8\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x5d\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x01\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x46\x25\x0e\x2e\x46\x03\x21\x46\x03\x09\xa6\x00\x06\x00\x32\x00\x00\xe3\x92\xe4\x62\x65\x00\x01\x21\x01\x0e\x46\x25\x2e\x2e\x26\x47\x5f\x21\x20\x96\x60\x09\x60\x00\x00\x36\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x46\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x10\x11\xc0\x00\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x05\x00\x00\xff\x00\x03\x00\x00\x30\x07\x00\x00\x40\x00\x80\x05\x00\x00\x41\x4d\x07\x00\x00\x42\x00\x80\x03\x00\x00\x0a\x07\x00\x00\x42\x00\x80\x05\x00\x00\xff\x01\x88\x00\x00\x4d\xca\xc1\x0a\x80\x30\x0c\x03\xd0\x2e\x6b\xcb\x98\x17\xf1\x14\x50\xfc\xff\xcf\xb4\x66\x1e\x3c\x84\x47\x9a\xce\x1c\xb9\x1b\xb7\xf9\xda\x48\x09\x9e\xb2\xf3\x92\xce\x5b\x86\xf6\x56\x7f\x21\x41\x2f\x51\xa6\x7a\xd7\x1d\xe5\xea\xae\x3d\xca\xd5\x83\x71\x60\xd8\x17\xfc\x62\x0f\xa8\x00\x00\xe3\x4a\xe6\x62\xe1\x60\x0c\x60\xe0\xe2\xe3\x60\x14\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\x60\x54\xe2\xe0\x62\x34\x10\x62\x34\x90\x60\x02\x8a\x70\x71\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\x82\x05\x28\xc6\xcd\x25\xca\xc1\x68\xc4\x0b\x52\xc5\x6c\xa0\x67\x2a\x05\x22\xc0\x4a\x21\x86\x31\x09\x30\x81\xb5\xb2\x02\x00\x36\x01\x00\x25\x8c\xbd\x0a\xc2\x30\x14\x85\x73\x6f\x92\xf6\x92\x6a\x09\x01\x21\x64\x92\x4e\x75\x91\x58\x71\xc9\x64\x27\x5d\x2c\x1d\x5d\xfd\x59\xc4\x42\x37\x5f\xc0\x17\xe8\x23\x9b\xc6\xe1\x3b\x70\x0f\xdf\xb9\xc4\xf5\x17\x5d\x41\x5c\x4f\x60\x37\xeb\x53\x0d\x55\x4d\x0b\x23\x01\xb9\x90\x2e\xbf\x0f\xe3\xe3\xdd\x8d\x0e\x5f\x4f\x27\x3e\xb7\x61\x97\xb2\x49\xb9\xaf\x90\x20\x92\x27\x32\x2a\x6b\xf4\xf3\x0d\x1e\x82\x20\xe8\x59\x28\x09\x4c\x46\x4c\x33\xcb\x7a\x76\x95\x41\x47\x9f\x14\x78\x03\xde\x62\x6c\x54\x30\xb1\x51\x0a\xdb\x8b\x89\x58\x11\xbb\x22\xac\x08\x9a\xe5\x6c\x71\xbf\x3d\xb8\x39\x92\xfa\x7f\x86\x1a\xd3\x54\x1e\xa7\xee\xcc\x7e\x08\x9e\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x57\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
# ''
# On empty message exception: IOError: File size too small, Stack trace (when copying this message, always include the lines below):
# /src/Processors/Formats/Impl/ORCBlockInputFormat.cpp:36: DB::ORCBlockInputFormat::generate() @ 0x1df282a6 in /usr/bin/clickhouse
# /src/Processors/ISource.cpp:48: DB::ISource::work() @ 0x1dd79737 in /usr/bin/clickhouse
],
},
'CapnProto': {
'data_sample': [
b'\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00',
b'\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00',
b'\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00',
# ''
# On empty message exception: Cannot read all data. Bytes read: 0. Bytes expected: 4.
# /src/IO/ReadBuffer.h:157: DB::ReadBuffer::readStrict(char*, unsigned long) @ 0x15c6894d in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp:212: DB::CapnProtoRowInputFormat::readMessage() @ 0x1ded1cab in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp:241: DB::CapnProtoRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1ded205d in /usr/bin/clickhouse
],
'extra_settings': ", kafka_schema='test:TestRecordStruct'"
},
# 'Parquet' : {
# not working at all with Kafka: DB::Exception: IOError: Invalid Parquet file size is 0 bytes
# /contrib/libcxx/include/exception:129: std::exception::capture() @ 0x15c33fe8 in /usr/bin/clickhouse
# /contrib/libcxx/include/exception:109: std::exception::exception() @ 0x15c33fb5 in /usr/bin/clickhouse
# /contrib/poco/Foundation/src/Exception.cpp:27: Poco::Exception::Exception(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, int) @ 0x21877833 in /usr/bin/clickhouse
# /src/Common/Exception.cpp:37: DB::Exception::Exception(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, int) @ 0x15c2d2a3 in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp:70: DB::ParquetBlockInputFormat::prepareReader() @ 0x1df2b0c2 in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp:36: DB::ParquetBlockInputFormat::ParquetBlockInputFormat(DB::ReadBuffer&, DB::Block) @ 0x1df2af8b in /usr/bin/clickhouse
# /contrib/libcxx/include/memory:2214: std::__1::__compressed_pair_elem<DB::ParquetBlockInputFormat, 1, false>::__compressed_pair_elem<DB::ReadBuffer&, DB::Block const&, 0ul, 1ul>(std::__1::piecewise_construct_t, std::__1::tuple<DB::ReadBuffer&, DB::Block const&>, std::__1::__tuple_indices<0ul, 1ul>) @ 0x1df2dc88 in /usr/bin/clickhouse
# /contrib/libcxx/include/memory:2299: std::__1::__compressed_pair<std::__1::allocator<DB::ParquetBlockInputFormat>, DB::ParquetBlockInputFormat>::__compressed_pair<std::__1::allocator<DB::ParquetBlockInputFormat>&, DB::ReadBuffer&, DB::Block const&>(std::__1::piecewise_construct_t, std::__1::tuple<std::__1::allocator<DB::ParquetBlockInputFormat>&>, std::__1::tuple<DB::ReadBuffer&, DB::Block const&>) @ 0x1df2d9c8 in /usr/bin/clickhouse
# /contrib/libcxx/include/memory:3569: std::__1::__shared_ptr_emplace<DB::ParquetBlockInputFormat, std::__1::allocator<DB::ParquetBlockInputFormat> >::__shared_ptr_emplace<DB::ReadBuffer&, DB::Block const&>(std::__1::allocator<DB::ParquetBlockInputFormat>, DB::ReadBuffer&, DB::Block const&) @ 0x1df2d687 in /usr/bin/clickhouse
# /contrib/libcxx/include/memory:4400: std::__1::enable_if<!(is_array<DB::ParquetBlockInputFormat>::value), std::__1::shared_ptr<DB::ParquetBlockInputFormat> >::type std::__1::make_shared<DB::ParquetBlockInputFormat, DB::ReadBuffer&, DB::Block const&>(DB::ReadBuffer&, DB::Block const&) @ 0x1df2d455 in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp:95: DB::registerInputFormatProcessorParquet(DB::FormatFactory&)::$_0::operator()(DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) const @ 0x1df2cec7 in /usr/bin/clickhouse
# /contrib/libcxx/include/type_traits:3519: decltype(std::__1::forward<DB::registerInputFormatProcessorParquet(DB::FormatFactory&)::$_0&>(fp)(std::__1::forward<DB::ReadBuffer&>(fp0), std::__1::forward<DB::Block const&>(fp0), std::__1::forward<DB::RowInputFormatParams const&>(fp0), std::__1::forward<DB::FormatSettings const&>(fp0))) std::__1::__invoke<DB::registerInputFormatProcessorParquet(DB::FormatFactory&)::$_0&, DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&>(DB::registerInputFormatProcessorParquet(DB::FormatFactory&)::$_0&, DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) @ 0x1df2ce6a in /usr/bin/clickhouse
# /contrib/libcxx/include/__functional_base:317: std::__1::shared_ptr<DB::IInputFormat> std::__1::__invoke_void_return_wrapper<std::__1::shared_ptr<DB::IInputFormat> >::__call<DB::registerInputFormatProcessorParquet(DB::FormatFactory&)::$_0&, DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&>(DB::registerInputFormatProcessorParquet(DB::FormatFactory&)::$_0&, DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) @ 0x1df2cd7d in /usr/bin/clickhouse
# /contrib/libcxx/include/functional:1540: std::__1::__function::__alloc_func<DB::registerInputFormatProcessorParquet(DB::FormatFactory&)::$_0, std::__1::allocator<DB::registerInputFormatProcessorParquet(DB::FormatFactory&)::$_0>, std::__1::shared_ptr<DB::IInputFormat> (DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&)>::operator()(DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) @ 0x1df2ccda in /usr/bin/clickhouse
# /contrib/libcxx/include/functional:1714: std::__1::__function::__func<DB::registerInputFormatProcessorParquet(DB::FormatFactory&)::$_0, std::__1::allocator<DB::registerInputFormatProcessorParquet(DB::FormatFactory&)::$_0>, std::__1::shared_ptr<DB::IInputFormat> (DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&)>::operator()(DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) @ 0x1df2bdec in /usr/bin/clickhouse
# /contrib/libcxx/include/functional:1867: std::__1::__function::__value_func<std::__1::shared_ptr<DB::IInputFormat> (DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&)>::operator()(DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) const @ 0x1dd14dbd in /usr/bin/clickhouse
# /contrib/libcxx/include/functional:2473: std::__1::function<std::__1::shared_ptr<DB::IInputFormat> (DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&)>::operator()(DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) const @ 0x1dd07035 in /usr/bin/clickhouse
# /src/Formats/FormatFactory.cpp:258: DB::FormatFactory::getInputFormat(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, DB::ReadBuffer&, DB::Block const&, DB::Context const&, unsigned long, std::__1::function<void ()>) const @ 0x1dd04007 in /usr/bin/clickhouse
# /src/Storages/Kafka/KafkaBlockInputStream.cpp:76: DB::KafkaBlockInputStream::readImpl() @ 0x1d8f6559 in /usr/bin/clickhouse
# /src/DataStreams/IBlockInputStream.cpp:60: DB::IBlockInputStream::read() @ 0x1c9c92fd in /usr/bin/clickhouse
# /src/DataStreams/copyData.cpp:26: void DB::copyDataImpl<DB::copyData(DB::IBlockInputStream&, DB::IBlockOutputStream&, std::__1::atomic<bool>*)::$_0&, void (&)(DB::Block const&)>(DB::IBlockInputStream&, DB::IBlockOutputStream&, DB::copyData(DB::IBlockInputStream&, DB::IBlockOutputStream&, std::__1::atomic<bool>*)::$_0&, void (&)(DB::Block const&)) @ 0x1c9ea01c in /usr/bin/clickhouse
# /src/DataStreams/copyData.cpp:63: DB::copyData(DB::IBlockInputStream&, DB::IBlockOutputStream&, std::__1::atomic<bool>*) @ 0x1c9e9fc7 in /usr/bin/clickhouse
# /src/Storages/Kafka/StorageKafka.cpp:565: DB::StorageKafka::streamToViews() @ 0x1d8cc3fa in /usr/bin/clickhouse
# # 'data_sample' : [
# # b'\x50\x41\x52\x31\x15\x04\x15\x10\x15\x14\x4c\x15\x02\x15\x04\x12\x00\x00\x08\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x0c\x15\x10\x4c\x15\x02\x15\x04\x12\x00\x00\x06\x14\x02\x00\x00\x00\x41\x4d\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x03\x08\x01\x02\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x3f\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x03\x08\x01\x02\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x01\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x15\x02\x19\x6c\x35\x00\x18\x06\x73\x63\x68\x65\x6d\x61\x15\x0a\x00\x15\x04\x25\x00\x18\x02\x69\x64\x00\x15\x02\x25\x00\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x25\x18\x4c\xac\x13\x10\x12\x00\x00\x00\x15\x0c\x25\x00\x18\x04\x76\x61\x6c\x31\x25\x00\x4c\x1c\x00\x00\x00\x15\x08\x25\x00\x18\x04\x76\x61\x6c\x32\x00\x15\x02\x25\x00\x18\x04\x76\x61\x6c\x33\x25\x16\x4c\xac\x13\x08\x12\x00\x00\x00\x16\x02\x19\x1c\x19\x5c\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x16\x98\x05\x16\x02\x00\x28\x22\x70\x61\x72\x71\x75\x65\x74\x2d\x63\x70\x70\x20\x76\x65\x72\x73\x69\x6f\x6e\x20\x31\x2e\x35\x2e\x31\x2d\x53\x4e\x41\x50\x53\x48\x4f\x54\x19\x5c\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x00\xc4\x01\x00\x00\x50\x41\x52\x31',
# # b'\x50\x41\x52\x31\x15\x04\x15\xf0\x01\x15\x90\x01\x4c\x15\x1e\x15\x04\x12\x00\x00\x78\x04\x01\x00\x09\x01\x00\x02\x09\x07\x04\x00\x03\x0d\x08\x00\x04\x0d\x08\x00\x05\x0d\x08\x00\x06\x0d\x08\x00\x07\x0d\x08\x00\x08\x0d\x08\x00\x09\x0d\x08\x00\x0a\x0d\x08\x00\x0b\x0d\x08\x00\x0c\x0d\x08\x00\x0d\x0d\x08\x3c\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x15\x00\x15\x14\x15\x18\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x18\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x24\x04\x05\x10\x32\x54\x76\x98\xba\xdc\x0e\x26\xca\x02\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x1e\x16\x9e\x03\x16\xc2\x02\x26\xb8\x01\x26\x08\x1c\x18\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x1e\x00\x26\xd8\x04\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\x8c\x04\x26\xe4\x03\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x0c\x15\x10\x4c\x15\x02\x15\x04\x12\x00\x00\x06\x14\x02\x00\x00\x00\x41\x4d\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x03\x08\x01\x1e\x00\x26\xb2\x06\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x1e\x16\x68\x16\x70\x26\xee\x05\x26\xc2\x05\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x3f\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x03\x08\x01\x1e\x00\x26\x9a\x08\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x1e\x16\x84\x01\x16\x8c\x01\x26\xb6\x07\x26\x8e\x07\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x01\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x03\x08\x01\x1e\x00\x26\x8e\x0a\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\xc2\x09\x26\x9a\x09\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x15\x02\x19\x6c\x35\x00\x18\x06\x73\x63\x68\x65\x6d\x61\x15\x0a\x00\x15\x04\x25\x00\x18\x02\x69\x64\x00\x15\x02\x25\x00\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x25\x18\x4c\xac\x13\x10\x12\x00\x00\x00\x15\x0c\x25\x00\x18\x04\x76\x61\x6c\x31\x25\x00\x4c\x1c\x00\x00\x00\x15\x08\x25\x00\x18\x04\x76\x61\x6c\x32\x00\x15\x02\x25\x00\x18\x04\x76\x61\x6c\x33\x25\x16\x4c\xac\x13\x08\x12\x00\x00\x00\x16\x1e\x19\x1c\x19\x5c\x26\xca\x02\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x1e\x16\x9e\x03\x16\xc2\x02\x26\xb8\x01\x26\x08\x1c\x18\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x26\xd8\x04\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\x8c\x04\x26\xe4\x03\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x26\xb2\x06\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x1e\x16\x68\x16\x70\x26\xee\x05\x26\xc2\x05\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x26\x9a\x08\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x1e\x16\x84\x01\x16\x8c\x01\x26\xb6\x07\x26\x8e\x07\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x26\x8e\x0a\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\xc2\x09\x26\x9a\x09\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x16\xa6\x06\x16\x1e\x00\x28\x22\x70\x61\x72\x71\x75\x65\x74\x2d\x63\x70\x70\x20\x76\x65\x72\x73\x69\x6f\x6e\x20\x31\x2e\x35\x2e\x31\x2d\x53\x4e\x41\x50\x53\x48\x4f\x54\x19\x5c\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x00\xc5\x01\x00\x00\x50\x41\x52\x31',
# # b'\x50\x41\x52\x31\x15\x04\x15\x10\x15\x14\x4c\x15\x02\x15\x04\x12\x00\x00\x08\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x0c\x15\x10\x4c\x15\x02\x15\x04\x12\x00\x00\x06\x14\x02\x00\x00\x00\x41\x4d\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x03\x08\x01\x02\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x3f\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x03\x08\x01\x02\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x01\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x15\x02\x19\x6c\x35\x00\x18\x06\x73\x63\x68\x65\x6d\x61\x15\x0a\x00\x15\x04\x25\x00\x18\x02\x69\x64\x00\x15\x02\x25\x00\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x25\x18\x4c\xac\x13\x10\x12\x00\x00\x00\x15\x0c\x25\x00\x18\x04\x76\x61\x6c\x31\x25\x00\x4c\x1c\x00\x00\x00\x15\x08\x25\x00\x18\x04\x76\x61\x6c\x32\x00\x15\x02\x25\x00\x18\x04\x76\x61\x6c\x33\x25\x16\x4c\xac\x13\x08\x12\x00\x00\x00\x16\x02\x19\x1c\x19\x5c\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x16\x98\x05\x16\x02\x00\x28\x22\x70\x61\x72\x71\x75\x65\x74\x2d\x63\x70\x70\x20\x76\x65\x72\x73\x69\x6f\x6e\x20\x31\x2e\x35\x2e\x31\x2d\x53\x4e\x41\x50\x53\x48\x4f\x54\x19\x5c\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x00\xc4\x01\x00\x00\x50\x41\x52\x31',
# # ''
# # ],
# },
# 'Avro' : {
# 'data_sample' : [
# b'\x4f\x62\x6a\x01\x04\x16\x61\x76\x72\x6f\x2e\x73\x63\x68\x65\x6d\x61\x82\x03\x7b\x22\x74\x79\x70\x65\x22\x3a\x22\x72\x65\x63\x6f\x72\x64\x22\x2c\x22\x6e\x61\x6d\x65\x22\x3a\x22\x72\x6f\x77\x22\x2c\x22\x66\x69\x65\x6c\x64\x73\x22\x3a\x5b\x7b\x22\x6e\x61\x6d\x65\x22\x3a\x22\x69\x64\x22\x2c\x22\x74\x79\x70\x65\x22\x3a\x22\x6c\x6f\x6e\x67\x22\x7d\x2c\x7b\x22\x6e\x61\x6d\x65\x22\x3a\x22\x62\x6c\x6f\x63\x6b\x4e\x6f\x22\x2c\x22\x74\x79\x70\x65\x22\x3a\x22\x69\x6e\x74\x22\x7d\x2c\x7b\x22\x6e\x61\x6d\x65\x22\x3a\x22\x76\x61\x6c\x31\x22\x2c\x22\x74\x79\x70\x65\x22\x3a\x22\x73\x74\x72\x69\x6e\x67\x22\x7d\x2c\x7b\x22\x6e\x61\x6d\x65\x22\x3a\x22\x76\x61\x6c\x32\x22\x2c\x22\x74\x79\x70\x65\x22\x3a\x22\x66\x6c\x6f\x61\x74\x22\x7d\x2c\x7b\x22\x6e\x61\x6d\x65\x22\x3a\x22\x76\x61\x6c\x33\x22\x2c\x22\x74\x79\x70\x65\x22\x3a\x22\x69\x6e\x74\x22\x7d\x5d\x7d\x14\x61\x76\x72\x6f\x2e\x63\x6f\x64\x65\x63\x08\x6e\x75\x6c\x6c\x00\x8d\x1f\xf2\x17\x71\xa4\x2e\xe4\xc9\x0a\x23\x67\x12\xaa\xc6\xc0\x02\x14\x00\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x8d\x1f\xf2\x17\x71\xa4\x2e\xe4\xc9\x0a\x23\x67\x12\xaa\xc6\xc0',
# b'\x4f\x62\x6a\x01\x04\x16\x61\x76\x72\x6f\x2e\x73\x63\x68\x65\x6d\x61\x82\x03\x7b\x22\x74\x79\x70\x65\x22\x3a\x22\x72\x65\x63\x6f\x72\x64\x22\x2c\x22\x6e\x61\x6d\x65\x22\x3a\x22\x72\x6f\x77\x22\x2c\x22\x66\x69\x65\x6c\x64\x73\x22\x3a\x5b\x7b\x22\x6e\x61\x6d\x65\x22\x3a\x22\x69\x64\x22\x2c\x22\x74\x79\x70\x65\x22\x3a\x22\x6c\x6f\x6e\x67\x22\x7d\x2c\x7b\x22\x6e\x61\x6d\x65\x22\x3a\x22\x62\x6c\x6f\x63\x6b\x4e\x6f\x22\x2c\x22\x74\x79\x70\x65\x22\x3a\x22\x69\x6e\x74\x22\x7d\x2c\x7b\x22\x6e\x61\x6d\x65\x22\x3a\x22\x76\x61\x6c\x31\x22\x2c\x22\x74\x79\x70\x65\x22\x3a\x22\x73\x74\x72\x69\x6e\x67\x22\x7d\x2c\x7b\x22\x6e\x61\x6d\x65\x22\x3a\x22\x76\x61\x6c\x32\x22\x2c\x22\x74\x79\x70\x65\x22\x3a\x22\x66\x6c\x6f\x61\x74\x22\x7d\x2c\x7b\x22\x6e\x61\x6d\x65\x22\x3a\x22\x76\x61\x6c\x33\x22\x2c\x22\x74\x79\x70\x65\x22\x3a\x22\x69\x6e\x74\x22\x7d\x5d\x7d\x14\x61\x76\x72\x6f\x2e\x63\x6f\x64\x65\x63\x08\x6e\x75\x6c\x6c\x00\xeb\x9d\x51\x82\xf2\x11\x3d\x0b\xc5\x92\x97\xb2\x07\x6d\x72\x5a\x1e\xac\x02\x02\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x04\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x06\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x08\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x0a\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x0c\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x0e\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x10\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x12\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x14\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x16\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x18\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x1a\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x1c\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x1e\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\xeb\x9d\x51\x82\xf2\x11\x3d\x0b\xc5\x92\x97\xb2\x07\x6d\x72\x5a',
# b'\x4f\x62\x6a\x01\x04\x16\x61\x76\x72\x6f\x2e\x73\x63\x68\x65\x6d\x61\x82\x03\x7b\x22\x74\x79\x70\x65\x22\x3a\x22\x72\x65\x63\x6f\x72\x64\x22\x2c\x22\x6e\x61\x6d\x65\x22\x3a\x22\x72\x6f\x77\x22\x2c\x22\x66\x69\x65\x6c\x64\x73\x22\x3a\x5b\x7b\x22\x6e\x61\x6d\x65\x22\x3a\x22\x69\x64\x22\x2c\x22\x74\x79\x70\x65\x22\x3a\x22\x6c\x6f\x6e\x67\x22\x7d\x2c\x7b\x22\x6e\x61\x6d\x65\x22\x3a\x22\x62\x6c\x6f\x63\x6b\x4e\x6f\x22\x2c\x22\x74\x79\x70\x65\x22\x3a\x22\x69\x6e\x74\x22\x7d\x2c\x7b\x22\x6e\x61\x6d\x65\x22\x3a\x22\x76\x61\x6c\x31\x22\x2c\x22\x74\x79\x70\x65\x22\x3a\x22\x73\x74\x72\x69\x6e\x67\x22\x7d\x2c\x7b\x22\x6e\x61\x6d\x65\x22\x3a\x22\x76\x61\x6c\x32\x22\x2c\x22\x74\x79\x70\x65\x22\x3a\x22\x66\x6c\x6f\x61\x74\x22\x7d\x2c\x7b\x22\x6e\x61\x6d\x65\x22\x3a\x22\x76\x61\x6c\x33\x22\x2c\x22\x74\x79\x70\x65\x22\x3a\x22\x69\x6e\x74\x22\x7d\x5d\x7d\x14\x61\x76\x72\x6f\x2e\x63\x6f\x64\x65\x63\x08\x6e\x75\x6c\x6c\x00\x73\x65\x4f\x7c\xd9\x33\xe1\x18\xdd\x30\xe8\x22\x2a\x58\x20\x6f\x02\x14\x00\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x73\x65\x4f\x7c\xd9\x33\xe1\x18\xdd\x30\xe8\x22\x2a\x58\x20\x6f',
# ],
# },
'AvroConfluent': {
'data_sample': [
avro_confluent_message(cluster.schema_registry_client,
{'id': 0, 'blockNo': 0, 'val1': str('AM'), 'val2': 0.5, "val3": 1}),
b''.join([avro_confluent_message(cluster.schema_registry_client,
{'id': id, 'blockNo': 0, 'val1': str('AM'),
'val2': 0.5, "val3": 1}) for id in range(1, 16)]),
avro_confluent_message(cluster.schema_registry_client,
{'id': 0, 'blockNo': 0, 'val1': str('AM'), 'val2': 0.5, "val3": 1}),
],
'extra_settings': ", format_avro_schema_registry_url='http://{}:{}'".format(
cluster.schema_registry_host,
cluster.schema_registry_port
),
'supports_empty_value': True,
}
# 'Arrow' : {
# # Not working at all: DB::Exception: Error while opening a table: Invalid: File is too small: 0, Stack trace (when copying this message, always include the lines below):
# # /src/Common/Exception.cpp:37: DB::Exception::Exception(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, int) @ 0x15c2d2a3 in /usr/bin/clickhouse
# # /src/Processors/Formats/Impl/ArrowBlockInputFormat.cpp:88: DB::ArrowBlockInputFormat::prepareReader() @ 0x1ddff1c3 in /usr/bin/clickhouse
# # /src/Processors/Formats/Impl/ArrowBlockInputFormat.cpp:26: DB::ArrowBlockInputFormat::ArrowBlockInputFormat(DB::ReadBuffer&, DB::Block const&, bool) @ 0x1ddfef63 in /usr/bin/clickhouse
# # /contrib/libcxx/include/memory:2214: std::__1::__compressed_pair_elem<DB::ArrowBlockInputFormat, 1, false>::__compressed_pair_elem<DB::ReadBuffer&, DB::Block const&, bool&&, 0ul, 1ul, 2ul>(std::__1::piecewise_construct_t, std::__1::tuple<DB::ReadBuffer&, DB::Block const&, bool&&>, std::__1::__tuple_indices<0ul, 1ul, 2ul>) @ 0x1de0470f in /usr/bin/clickhouse
# # /contrib/libcxx/include/memory:2299: std::__1::__compressed_pair<std::__1::allocator<DB::ArrowBlockInputFormat>, DB::ArrowBlockInputFormat>::__compressed_pair<std::__1::allocator<DB::ArrowBlockInputFormat>&, DB::ReadBuffer&, DB::Block const&, bool&&>(std::__1::piecewise_construct_t, std::__1::tuple<std::__1::allocator<DB::ArrowBlockInputFormat>&>, std::__1::tuple<DB::ReadBuffer&, DB::Block const&, bool&&>) @ 0x1de04375 in /usr/bin/clickhouse
# # /contrib/libcxx/include/memory:3569: std::__1::__shared_ptr_emplace<DB::ArrowBlockInputFormat, std::__1::allocator<DB::ArrowBlockInputFormat> >::__shared_ptr_emplace<DB::ReadBuffer&, DB::Block const&, bool>(std::__1::allocator<DB::ArrowBlockInputFormat>, DB::ReadBuffer&, DB::Block const&, bool&&) @ 0x1de03f97 in /usr/bin/clickhouse
# # /contrib/libcxx/include/memory:4400: std::__1::enable_if<!(is_array<DB::ArrowBlockInputFormat>::value), std::__1::shared_ptr<DB::ArrowBlockInputFormat> >::type std::__1::make_shared<DB::ArrowBlockInputFormat, DB::ReadBuffer&, DB::Block const&, bool>(DB::ReadBuffer&, DB::Block const&, bool&&) @ 0x1de03d4c in /usr/bin/clickhouse
# # /src/Processors/Formats/Impl/ArrowBlockInputFormat.cpp:107: DB::registerInputFormatProcessorArrow(DB::FormatFactory&)::$_0::operator()(DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) const @ 0x1de010df in /usr/bin/clickhouse
# 'data_sample' : [
# '\x41\x52\x52\x4f\x57\x31\x00\x00\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00\x10\x00\x00\x00\x0c\x00\x14\x00\x06\x00\x08\x00\x0c\x00\x10\x00\x0c\x00\x00\x00\x00\x00\x03\x00\x3c\x00\x00\x00\x28\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x58\x01\x00\x00\x00\x00\x00\x00\x60\x01\x00\x00\x00\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\x78\x01\x00\x00\x41\x52\x52\x4f\x57\x31',
# '\x41\x52\x52\x4f\x57\x31\x00\x00\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x48\x01\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\xd8\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00\x06\x00\x00\x00\x08\x00\x00\x00\x0a\x00\x00\x00\x0c\x00\x00\x00\x0e\x00\x00\x00\x10\x00\x00\x00\x12\x00\x00\x00\x14\x00\x00\x00\x16\x00\x00\x00\x18\x00\x00\x00\x1a\x00\x00\x00\x1c\x00\x00\x00\x1e\x00\x00\x00\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\xff\xff\xff\xff\x00\x00\x00\x00\x10\x00\x00\x00\x0c\x00\x14\x00\x06\x00\x08\x00\x0c\x00\x10\x00\x0c\x00\x00\x00\x00\x00\x03\x00\x3c\x00\x00\x00\x28\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x58\x01\x00\x00\x00\x00\x00\x00\x60\x01\x00\x00\x00\x00\x00\x00\x48\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\x78\x01\x00\x00\x41\x52\x52\x4f\x57\x31',
# '\x41\x52\x52\x4f\x57\x31\x00\x00\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00\x10\x00\x00\x00\x0c\x00\x14\x00\x06\x00\x08\x00\x0c\x00\x10\x00\x0c\x00\x00\x00\x00\x00\x03\x00\x3c\x00\x00\x00\x28\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x58\x01\x00\x00\x00\x00\x00\x00\x60\x01\x00\x00\x00\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\x78\x01\x00\x00\x41\x52\x52\x4f\x57\x31',
# ],
# },
# 'ArrowStream' : {
# # Not working at all:
# # Error while opening a table: Invalid: Tried reading schema message, was null or length 0, Stack trace (when copying this message, always include the lines below):
# # /src/Processors/Formats/Impl/ArrowBlockInputFormat.cpp:88: DB::ArrowBlockInputFormat::prepareReader() @ 0x1ddff1c3 in /usr/bin/clickhouse
# # /src/Processors/Formats/Impl/ArrowBlockInputFormat.cpp:26: DB::ArrowBlockInputFormat::ArrowBlockInputFormat(DB::ReadBuffer&, DB::Block const&, bool) @ 0x1ddfef63 in /usr/bin/clickhouse
# # /contrib/libcxx/include/memory:2214: std::__1::__compressed_pair_elem<DB::ArrowBlockInputFormat, 1, false>::__compressed_pair_elem<DB::ReadBuffer&, DB::Block const&, bool&&, 0ul, 1ul, 2ul>(std::__1::piecewise_construct_t, std::__1::tuple<DB::ReadBuffer&, DB::Block const&, bool&&>, std::__1::__tuple_indices<0ul, 1ul, 2ul>) @ 0x1de0470f in /usr/bin/clickhouse
# # /contrib/libcxx/include/memory:2299: std::__1::__compressed_pair<std::__1::allocator<DB::ArrowBlockInputFormat>, DB::ArrowBlockInputFormat>::__compressed_pair<std::__1::allocator<DB::ArrowBlockInputFormat>&, DB::ReadBuffer&, DB::Block const&, bool&&>(std::__1::piecewise_construct_t, std::__1::tuple<std::__1::allocator<DB::ArrowBlockInputFormat>&>, std::__1::tuple<DB::ReadBuffer&, DB::Block const&, bool&&>) @ 0x1de04375 in /usr/bin/clickhouse
# # /contrib/libcxx/include/memory:3569: std::__1::__shared_ptr_emplace<DB::ArrowBlockInputFormat, std::__1::allocator<DB::ArrowBlockInputFormat> >::__shared_ptr_emplace<DB::ReadBuffer&, DB::Block const&, bool>(std::__1::allocator<DB::ArrowBlockInputFormat>, DB::ReadBuffer&, DB::Block const&, bool&&) @ 0x1de03f97 in /usr/bin/clickhouse
# # /contrib/libcxx/include/memory:4400: std::__1::enable_if<!(is_array<DB::ArrowBlockInputFormat>::value), std::__1::shared_ptr<DB::ArrowBlockInputFormat> >::type std::__1::make_shared<DB::ArrowBlockInputFormat, DB::ReadBuffer&, DB::Block const&, bool>(DB::ReadBuffer&, DB::Block const&, bool&&) @ 0x1de03d4c in /usr/bin/clickhouse
# # /src/Processors/Formats/Impl/ArrowBlockInputFormat.cpp:117: DB::registerInputFormatProcessorArrow(DB::FormatFactory&)::$_1::operator()(DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) const @ 0x1de0273f in /usr/bin/clickhouse
# # /contrib/libcxx/include/type_traits:3519: decltype(std::__1::forward<DB::registerInputFormatProcessorArrow(DB::FormatFactory&)::$_1&>(fp)(std::__1::forward<DB::ReadBuffer&>(fp0), std::__1::forward<DB::Block const&>(fp0), std::__1::forward<DB::RowInputFormatParams const&>(fp0), std::__1::forward<DB::FormatSettings const&>(fp0))) std::__1::__invoke<DB::registerInputFormatProcessorArrow(DB::FormatFactory&)::$_1&, DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&>(DB::registerInputFormatProcessorArrow(DB::FormatFactory&)::$_1&, DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) @ 0x1de026da in /usr/bin/clickhouse
# # /contrib/libcxx/include/__functional_base:317: std::__1::shared_ptr<DB::IInputFormat> std::__1::__invoke_void_return_wrapper<std::__1::shared_ptr<DB::IInputFormat> >::__call<DB::registerInputFormatProcessorArrow(DB::FormatFactory&)::$_1&, DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&>(DB::registerInputFormatProcessorArrow(DB::FormatFactory&)::$_1&, DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) @ 0x1de025ed in /usr/bin/clickhouse
# # /contrib/libcxx/include/functional:1540: std::__1::__function::__alloc_func<DB::registerInputFormatProcessorArrow(DB::FormatFactory&)::$_1, std::__1::allocator<DB::registerInputFormatProcessorArrow(DB::FormatFactory&)::$_1>, std::__1::shared_ptr<DB::IInputFormat> (DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&)>::operator()(DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) @ 0x1de0254a in /usr/bin/clickhouse
# # /contrib/libcxx/include/functional:1714: std::__1::__function::__func<DB::registerInputFormatProcessorArrow(DB::FormatFactory&)::$_1, std::__1::allocator<DB::registerInputFormatProcessorArrow(DB::FormatFactory&)::$_1>, std::__1::shared_ptr<DB::IInputFormat> (DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&)>::operator()(DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) @ 0x1de0165c in /usr/bin/clickhouse
# # /contrib/libcxx/include/functional:1867: std::__1::__function::__value_func<std::__1::shared_ptr<DB::IInputFormat> (DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&)>::operator()(DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) const @ 0x1dd14dbd in /usr/bin/clickhouse
# # /contrib/libcxx/include/functional:2473: std::__1::function<std::__1::shared_ptr<DB::IInputFormat> (DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&)>::operator()(DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) const @ 0x1dd07035 in /usr/bin/clickhouse
# # /src/Formats/FormatFactory.cpp:258: DB::FormatFactory::getInputFormat(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, DB::ReadBuffer&, DB::Block const&, DB::Context const&, unsigned long, std::__1::function<void ()>) const @ 0x1dd04007 in /usr/bin/clickhouse
# # /src/Storages/Kafka/KafkaBlockInputStream.cpp:76: DB::KafkaBlockInputStream::readImpl() @ 0x1d8f6559 in /usr/bin/clickhouse
# # /src/DataStreams/IBlockInputStream.cpp:60: DB::IBlockInputStream::read() @ 0x1c9c92fd in /usr/bin/clickhouse
# # /src/DataStreams/copyData.cpp:26: void DB::copyDataImpl<DB::copyData(DB::IBlockInputStream&, DB::IBlockOutputStream&, std::__1::atomic<bool>*)::$_0&, void (&)(DB::Block const&)>(DB::IBlockInputStream&, DB::IBlockOutputStream&, DB::copyData(DB::IBlockInputStream&, DB::IBlockOutputStream&, std::__1::atomic<bool>*)::$_0&, void (&)(DB::Block const&)) @ 0x1c9ea01c in /usr/bin/clickhouse
# 'data_sample' : [
# '\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00',
# '\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x48\x01\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\xd8\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00\x06\x00\x00\x00\x08\x00\x00\x00\x0a\x00\x00\x00\x0c\x00\x00\x00\x0e\x00\x00\x00\x10\x00\x00\x00\x12\x00\x00\x00\x14\x00\x00\x00\x16\x00\x00\x00\x18\x00\x00\x00\x1a\x00\x00\x00\x1c\x00\x00\x00\x1e\x00\x00\x00\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\xff\xff\xff\xff\x00\x00\x00\x00',
# '\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00',
# ],
# },
}
for format_name, format_opts in list(all_formats.items()):
print(('Set up {}'.format(format_name)))
topic_name = 'format_tests_{}'.format(format_name)
data_sample = format_opts['data_sample']
data_prefix = []
# prepend empty value when supported
if format_opts.get('supports_empty_value', False):
data_prefix = data_prefix + ['']
kafka_produce(topic_name, data_prefix + data_sample)
instance.query('''
DROP TABLE IF EXISTS test.kafka_{format_name};
CREATE TABLE test.kafka_{format_name} (
id Int64,
blockNo UInt16,
val1 String,
val2 Float32,
val3 UInt8
) ENGINE = Kafka()
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = '{topic_name}',
kafka_group_name = '{topic_name}_group',
kafka_format = '{format_name}',
kafka_flush_interval_ms = 1000 {extra_settings};
DROP TABLE IF EXISTS test.kafka_{format_name}_mv;
CREATE MATERIALIZED VIEW test.kafka_{format_name}_mv Engine=Log AS
SELECT *, _topic, _partition, _offset FROM test.kafka_{format_name};
'''.format(topic_name=topic_name, format_name=format_name,
extra_settings=format_opts.get('extra_settings') or ''))
instance.wait_for_log_line('kafka.*Committed offset [0-9]+.*format_tests_', repetitions=len(all_formats.keys()), look_behind_lines=12000)
for format_name, format_opts in list(all_formats.items()):
print(('Checking {}'.format(format_name)))
topic_name = 'format_tests_{}'.format(format_name)
# shift offsets by 1 if format supports empty value
offsets = [1, 2, 3] if format_opts.get('supports_empty_value', False) else [0, 1, 2]
result = instance.query('SELECT * FROM test.kafka_{format_name}_mv;'.format(format_name=format_name))
expected = '''\
0 0 AM 0.5 1 {topic_name} 0 {offset_0}
1 0 AM 0.5 1 {topic_name} 0 {offset_1}
2 0 AM 0.5 1 {topic_name} 0 {offset_1}
3 0 AM 0.5 1 {topic_name} 0 {offset_1}
4 0 AM 0.5 1 {topic_name} 0 {offset_1}
5 0 AM 0.5 1 {topic_name} 0 {offset_1}
6 0 AM 0.5 1 {topic_name} 0 {offset_1}
7 0 AM 0.5 1 {topic_name} 0 {offset_1}
8 0 AM 0.5 1 {topic_name} 0 {offset_1}
9 0 AM 0.5 1 {topic_name} 0 {offset_1}
10 0 AM 0.5 1 {topic_name} 0 {offset_1}
11 0 AM 0.5 1 {topic_name} 0 {offset_1}
12 0 AM 0.5 1 {topic_name} 0 {offset_1}
13 0 AM 0.5 1 {topic_name} 0 {offset_1}
14 0 AM 0.5 1 {topic_name} 0 {offset_1}
15 0 AM 0.5 1 {topic_name} 0 {offset_1}
0 0 AM 0.5 1 {topic_name} 0 {offset_2}
'''.format(topic_name=topic_name, offset_0=offsets[0], offset_1=offsets[1], offset_2=offsets[2])
assert TSV(result) == TSV(expected), 'Proper result for format: {}'.format(format_name)
@pytest.mark.timeout(180)
def test_kafka_issue11308(kafka_cluster):
# Check that matview does respect Kafka SETTINGS
kafka_produce('issue11308', ['{"t": 123, "e": {"x": "woof"} }', '{"t": 123, "e": {"x": "woof"} }',
'{"t": 124, "e": {"x": "test"} }'])
instance.query('''
CREATE TABLE test.persistent_kafka (
time UInt64,
some_string String
)
ENGINE = MergeTree()
ORDER BY time;
CREATE TABLE test.kafka (t UInt64, `e.x` String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'issue11308',
kafka_group_name = 'issue11308',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n',
kafka_flush_interval_ms=1000,
input_format_import_nested_json = 1;
CREATE MATERIALIZED VIEW test.persistent_kafka_mv TO test.persistent_kafka AS
SELECT
`t` AS `time`,
`e.x` AS `some_string`
FROM test.kafka;
''')
while int(instance.query('SELECT count() FROM test.persistent_kafka')) < 3:
time.sleep(1)
result = instance.query('SELECT * FROM test.persistent_kafka ORDER BY time;')
instance.query('''
DROP TABLE test.persistent_kafka;
DROP TABLE test.persistent_kafka_mv;
''')
expected = '''\
123 woof
123 woof
124 test
'''
assert TSV(result) == TSV(expected)
@pytest.mark.timeout(180)
def test_kafka_issue4116(kafka_cluster):
# Check that format_csv_delimiter parameter works now - as part of all available format settings.
kafka_produce('issue4116', ['1|foo', '2|bar', '42|answer', '100|multi\n101|row\n103|message'])
instance.query('''
CREATE TABLE test.kafka (a UInt64, b String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'issue4116',
kafka_group_name = 'issue4116',
kafka_format = 'CSV',
kafka_row_delimiter = '\\n',
format_csv_delimiter = '|';
''')
result = instance.query('SELECT * FROM test.kafka ORDER BY a;')
expected = '''\
1 foo
2 bar
42 answer
100 multi
101 row
103 message
'''
assert TSV(result) == TSV(expected)
@pytest.mark.timeout(180)
def test_kafka_consumer_hang(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092")
topic_list = []
topic_list.append(NewTopic(name="consumer_hang", num_partitions=8, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
instance.query('''
DROP TABLE IF EXISTS test.kafka;
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'consumer_hang',
kafka_group_name = 'consumer_hang',
kafka_format = 'JSONEachRow',
kafka_num_consumers = 8;
CREATE TABLE test.view (key UInt64, value UInt64) ENGINE = Memory();
CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.kafka;
''')
instance.wait_for_log_line('kafka.*Stalled', repetitions=20)
# This should trigger heartbeat fail,
# which will trigger REBALANCE_IN_PROGRESS,
# and which can lead to consumer hang.
kafka_cluster.pause_container('kafka1')
instance.wait_for_log_line('heartbeat error')
kafka_cluster.unpause_container('kafka1')
# print("Attempt to drop")
instance.query('DROP TABLE test.kafka')
# kafka_cluster.open_bash_shell('instance')
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
# original problem appearance was a sequence of the following messages in librdkafka logs:
# BROKERFAIL -> |ASSIGN| -> REBALANCE_IN_PROGRESS -> "waiting for rebalance_cb" (repeated forever)
# so it was waiting forever while the application will execute queued rebalance callback
# from a user perspective: we expect no hanging 'drop' queries
# 'dr'||'op' to avoid self matching
assert int(instance.query("select count() from system.processes where position(lower(query),'dr'||'op')>0")) == 0
@pytest.mark.timeout(180)
def test_kafka_consumer_hang2(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092")
topic_list = []
topic_list.append(NewTopic(name="consumer_hang2", num_partitions=1, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
instance.query('''
DROP TABLE IF EXISTS test.kafka;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'consumer_hang2',
kafka_group_name = 'consumer_hang2',
kafka_format = 'JSONEachRow';
CREATE TABLE test.kafka2 (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'consumer_hang2',
kafka_group_name = 'consumer_hang2',
kafka_format = 'JSONEachRow';
''')
# first consumer subscribe the topic, try to poll some data, and go to rest
instance.query('SELECT * FROM test.kafka')
# second consumer do the same leading to rebalance in the first
# consumer, try to poll some data
instance.query('SELECT * FROM test.kafka2')
# echo 'SELECT * FROM test.kafka; SELECT * FROM test.kafka2; DROP TABLE test.kafka;' | clickhouse client -mn &
# kafka_cluster.open_bash_shell('instance')
# first consumer has pending rebalance callback unprocessed (no poll after select)
# one of those queries was failing because of
# https://github.com/edenhill/librdkafka/issues/2077
# https://github.com/edenhill/librdkafka/issues/2898
instance.query('DROP TABLE test.kafka')
instance.query('DROP TABLE test.kafka2')
# from a user perspective: we expect no hanging 'drop' queries
# 'dr'||'op' to avoid self matching
assert int(instance.query("select count() from system.processes where position(lower(query),'dr'||'op')>0")) == 0
@pytest.mark.timeout(120)
def test_kafka_csv_with_delimiter(kafka_cluster):
messages = []
for i in range(50):
messages.append('{i}, {i}'.format(i=i))
kafka_produce('csv', messages)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'csv',
kafka_group_name = 'csv',
kafka_format = 'CSV';
''')
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
@pytest.mark.timeout(120)
def test_kafka_tsv_with_delimiter(kafka_cluster):
messages = []
for i in range(50):
messages.append('{i}\t{i}'.format(i=i))
kafka_produce('tsv', messages)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'tsv',
kafka_group_name = 'tsv',
kafka_format = 'TSV';
''')
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
@pytest.mark.timeout(120)
def test_kafka_select_empty(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092")
topic_list = []
topic_list.append(NewTopic(name="empty", num_partitions=1, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
instance.query('''
CREATE TABLE test.kafka (key UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'empty',
kafka_group_name = 'empty',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
''')
assert int(instance.query('SELECT count() FROM test.kafka')) == 0
@pytest.mark.timeout(180)
def test_kafka_json_without_delimiter(kafka_cluster):
messages = ''
for i in range(25):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce('json', [messages])
messages = ''
for i in range(25, 50):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce('json', [messages])
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'json',
kafka_group_name = 'json',
kafka_format = 'JSONEachRow';
''')
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
@pytest.mark.timeout(180)
def test_kafka_protobuf(kafka_cluster):
kafka_produce_protobuf_messages('pb', 0, 20)
kafka_produce_protobuf_messages('pb', 20, 1)
kafka_produce_protobuf_messages('pb', 21, 29)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'pb',
kafka_group_name = 'pb',
kafka_format = 'Protobuf',
kafka_schema = 'kafka.proto:KeyValuePair';
''')
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
@pytest.mark.timeout(180)
def test_kafka_string_field_on_first_position_in_protobuf(kafka_cluster):
# https://github.com/ClickHouse/ClickHouse/issues/12615
kafka_produce_protobuf_social('string_field_on_first_position_in_protobuf', 0, 20)
kafka_produce_protobuf_social('string_field_on_first_position_in_protobuf', 20, 1)
kafka_produce_protobuf_social('string_field_on_first_position_in_protobuf', 21, 29)
instance.query('''
CREATE TABLE test.kafka (
username String,
timestamp Int32
) ENGINE = Kafka()
SETTINGS
kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'string_field_on_first_position_in_protobuf',
kafka_group_name = 'string_field_on_first_position_in_protobuf',
kafka_format = 'Protobuf',
kafka_schema = 'social:User';
''')
result = instance.query('SELECT * FROM test.kafka', ignore_error=True)
expected = '''\
John Doe 0 1000000
John Doe 1 1000001
John Doe 2 1000002
John Doe 3 1000003
John Doe 4 1000004
John Doe 5 1000005
John Doe 6 1000006
John Doe 7 1000007
John Doe 8 1000008
John Doe 9 1000009
John Doe 10 1000010
John Doe 11 1000011
John Doe 12 1000012
John Doe 13 1000013
John Doe 14 1000014
John Doe 15 1000015
John Doe 16 1000016
John Doe 17 1000017
John Doe 18 1000018
John Doe 19 1000019
John Doe 20 1000020
John Doe 21 1000021
John Doe 22 1000022
John Doe 23 1000023
John Doe 24 1000024
John Doe 25 1000025
John Doe 26 1000026
John Doe 27 1000027
John Doe 28 1000028
John Doe 29 1000029
John Doe 30 1000030
John Doe 31 1000031
John Doe 32 1000032
John Doe 33 1000033
John Doe 34 1000034
John Doe 35 1000035
John Doe 36 1000036
John Doe 37 1000037
John Doe 38 1000038
John Doe 39 1000039
John Doe 40 1000040
John Doe 41 1000041
John Doe 42 1000042
John Doe 43 1000043
John Doe 44 1000044
John Doe 45 1000045
John Doe 46 1000046
John Doe 47 1000047
John Doe 48 1000048
John Doe 49 1000049
'''
assert TSV(result) == TSV(expected)
@pytest.mark.timeout(30)
def test_kafka_protobuf_no_delimiter(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'pb_no_delimiter',
kafka_group_name = 'pb_no_delimiter',
kafka_format = 'ProtobufSingle',
kafka_schema = 'kafka.proto:KeyValuePair';
''')
kafka_produce_protobuf_messages_no_delimeters('pb_no_delimiter', 0, 20)
kafka_produce_protobuf_messages_no_delimeters('pb_no_delimiter', 20, 1)
kafka_produce_protobuf_messages_no_delimeters('pb_no_delimiter', 21, 29)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
instance.query('''
CREATE TABLE test.kafka_writer (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'pb_no_delimiter',
kafka_group_name = 'pb_no_delimiter',
kafka_format = 'ProtobufSingle',
kafka_schema = 'kafka.proto:KeyValuePair';
''')
instance.query("INSERT INTO test.kafka_writer VALUES (13,'Friday'),(42,'Answer to the Ultimate Question of Life, the Universe, and Everything'), (110, 'just a number')")
time.sleep(1)
result = instance.query("SELECT * FROM test.kafka ORDER BY key", ignore_error=True)
expected = '''\
13 Friday
42 Answer to the Ultimate Question of Life, the Universe, and Everything
110 just a number
'''
assert TSV(result) == TSV(expected)
@pytest.mark.timeout(180)
def test_kafka_materialized_view(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'mv',
kafka_group_name = 'mv',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce('mv', messages)
while True:
result = instance.query('SELECT * FROM test.view')
if kafka_check_result(result):
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_check_result(result, True)
@pytest.mark.timeout(180)
def test_librdkafka_compression(kafka_cluster):
"""
Regression for UB in snappy-c (that is used in librdkafka),
backport pr is [1].
[1]: https://github.com/ClickHouse-Extras/librdkafka/pull/3
Example of corruption:
2020.12.10 09:59:56.831507 [ 20 ] {} <Error> void DB::StorageKafka::threadFunc(size_t): Code: 27, e.displayText() = DB::Exception: Cannot parse input: expected '"' before: 'foo"}': (while reading the value of key value): (at row 1)
To trigger this regression there should duplicated messages
Orignal reproducer is:
$ gcc --version |& fgrep gcc
gcc (GCC) 10.2.0
$ yes foobarbaz | fold -w 80 | head -n10 >| in-…
$ make clean && make CFLAGS='-Wall -g -O2 -ftree-loop-vectorize -DNDEBUG=1 -DSG=1 -fPIC'
$ ./verify in
final comparision of in failed at 20 of 100
"""
supported_compression_types = ['gzip', 'snappy', 'lz4', 'zstd', 'uncompressed']
messages = []
expected = []
value = 'foobarbaz'*10
number_of_messages = 50
for i in range(number_of_messages):
messages.append(json.dumps({'key': i, 'value': value}))
expected.append(f'{i}\t{value}')
expected = '\n'.join(expected)
for compression_type in supported_compression_types:
print(('Check compression {}'.format(compression_type)))
topic_name = 'test_librdkafka_compression_{}'.format(compression_type)
admin_client = admin.AdminClient({'bootstrap.servers': 'localhost:9092'})
topic = admin.NewTopic(topic=topic_name, num_partitions=1, replication_factor=1, config={
'compression.type': compression_type,
})
admin_client.create_topics(new_topics=[topic], validate_only=False)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = '{topic_name}',
kafka_group_name = '{topic_name}_group',
kafka_format = 'JSONEachRow',
kafka_flush_interval_ms = 1000;
CREATE MATERIALIZED VIEW test.consumer Engine=Log AS
SELECT * FROM test.kafka;
'''.format(topic_name=topic_name) )
kafka_produce(topic_name, messages)
instance.wait_for_log_line("Committed offset {}".format(number_of_messages))
result = instance.query('SELECT * FROM test.consumer')
assert TSV(result) == TSV(expected)
instance.query('DROP TABLE test.kafka SYNC')
instance.query('DROP TABLE test.consumer SYNC')
@pytest.mark.timeout(180)
def test_kafka_materialized_view_with_subquery(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'mvsq',
kafka_group_name = 'mvsq',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM (SELECT * FROM test.kafka);
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce('mvsq', messages)
while True:
result = instance.query('SELECT * FROM test.view')
if kafka_check_result(result):
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_check_result(result, True)
@pytest.mark.timeout(180)
def test_kafka_many_materialized_views(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view1;
DROP TABLE IF EXISTS test.view2;
DROP TABLE IF EXISTS test.consumer1;
DROP TABLE IF EXISTS test.consumer2;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'mmv',
kafka_group_name = 'mmv',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view1 (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE TABLE test.view2 (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer1 TO test.view1 AS
SELECT * FROM test.kafka;
CREATE MATERIALIZED VIEW test.consumer2 TO test.view2 AS
SELECT * FROM test.kafka;
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce('mmv', messages)
while True:
result1 = instance.query('SELECT * FROM test.view1')
result2 = instance.query('SELECT * FROM test.view2')
if kafka_check_result(result1) and kafka_check_result(result2):
break
instance.query('''
DROP TABLE test.consumer1;
DROP TABLE test.consumer2;
DROP TABLE test.view1;
DROP TABLE test.view2;
''')
kafka_check_result(result1, True)
kafka_check_result(result2, True)
@pytest.mark.timeout(300)
def test_kafka_flush_on_big_message(kafka_cluster):
# Create batchs of messages of size ~100Kb
kafka_messages = 1000
batch_messages = 1000
messages = [json.dumps({'key': i, 'value': 'x' * 100}) * batch_messages for i in range(kafka_messages)]
kafka_produce('flush', messages)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'flush',
kafka_group_name = 'flush',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 10;
CREATE TABLE test.view (key UInt64, value String)
ENGINE = MergeTree
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
client = KafkaAdminClient(bootstrap_servers="localhost:9092")
received = False
while not received:
try:
offsets = client.list_consumer_group_offsets('flush')
for topic, offset in list(offsets.items()):
if topic.topic == 'flush' and offset.offset == kafka_messages:
received = True
break
except kafka.errors.GroupCoordinatorNotAvailableError:
continue
while True:
result = instance.query('SELECT count() FROM test.view')
if int(result) == kafka_messages * batch_messages:
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
assert int(result) == kafka_messages * batch_messages, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(180)
def test_kafka_virtual_columns(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'virt1',
kafka_group_name = 'virt1',
kafka_format = 'JSONEachRow';
''')
messages = ''
for i in range(25):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce('virt1', [messages], 0)
messages = ''
for i in range(25, 50):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce('virt1', [messages], 0)
result = ''
while True:
result += instance.query(
'''SELECT _key, key, _topic, value, _offset, _partition, _timestamp = 0 ? '0000-00-00 00:00:00' : toString(_timestamp) AS _timestamp FROM test.kafka''',
ignore_error=True)
if kafka_check_result(result, False, 'test_kafka_virtual1.reference'):
break
kafka_check_result(result, True, 'test_kafka_virtual1.reference')
@pytest.mark.timeout(180)
def test_kafka_virtual_columns_with_materialized_view(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'virt2',
kafka_group_name = 'virt2',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64, kafka_key String, topic String, offset UInt64, partition UInt64, timestamp Nullable(DateTime('UTC')))
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT *, _key as kafka_key, _topic as topic, _offset as offset, _partition as partition, _timestamp = 0 ? '0000-00-00 00:00:00' : toString(_timestamp) as timestamp FROM test.kafka;
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce('virt2', messages, 0)
while True:
result = instance.query('SELECT kafka_key, key, topic, value, offset, partition, timestamp FROM test.view')
if kafka_check_result(result, False, 'test_kafka_virtual2.reference'):
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_check_result(result, True, 'test_kafka_virtual2.reference')
@pytest.mark.timeout(180)
def test_kafka_insert(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert1',
kafka_group_name = 'insert1',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
''')
values = []
for i in range(50):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
while True:
try:
instance.query("INSERT INTO test.kafka VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
messages = []
while True:
messages.extend(kafka_consume('insert1'))
if len(messages) == 50:
break
result = '\n'.join(messages)
kafka_check_result(result, True)
@pytest.mark.timeout(240)
def test_kafka_produce_consume(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert2',
kafka_group_name = 'insert2',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
messages_num = 10000
def insert():
values = []
for i in range(messages_num):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
while True:
try:
instance.query("INSERT INTO test.kafka VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
threads = []
threads_num = 16
for _ in range(threads_num):
threads.append(threading.Thread(target=insert))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
while True:
result = instance.query('SELECT count() FROM test.view')
time.sleep(1)
if int(result) == messages_num * threads_num:
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
for thread in threads:
thread.join()
assert int(result) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(300)
def test_kafka_commit_on_block_write(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'block',
kafka_group_name = 'block',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
cancel = threading.Event()
i = [0]
def produce():
while not cancel.is_set():
messages = []
for _ in range(101):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
kafka_produce('block', messages)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
while int(instance.query('SELECT count() FROM test.view')) == 0:
time.sleep(1)
cancel.set()
instance.query('''
DROP TABLE test.kafka;
''')
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'block',
kafka_group_name = 'block',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_row_delimiter = '\\n';
''')
while int(instance.query('SELECT uniqExact(key) FROM test.view')) < i[0]:
time.sleep(1)
result = int(instance.query('SELECT count() == uniqExact(key) FROM test.view'))
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_thread.join()
assert result == 1, 'Messages from kafka get duplicated!'
@pytest.mark.timeout(180)
def test_kafka_virtual_columns2(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092")
topic_list = []
topic_list.append(NewTopic(name="virt2_0", num_partitions=2, replication_factor=1))
topic_list.append(NewTopic(name="virt2_1", num_partitions=2, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
instance.query('''
CREATE TABLE test.kafka (value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'virt2_0,virt2_1',
kafka_group_name = 'virt2',
kafka_num_consumers = 2,
kafka_format = 'JSONEachRow';
CREATE MATERIALIZED VIEW test.view Engine=Log AS
SELECT value, _key, _topic, _partition, _offset, toUnixTimestamp(_timestamp), toUnixTimestamp64Milli(_timestamp_ms), _headers.name, _headers.value FROM test.kafka;
''')
producer = KafkaProducer(bootstrap_servers="localhost:9092", value_serializer=producer_serializer, key_serializer=producer_serializer)
producer.send(topic='virt2_0', value=json.dumps({'value': 1}), partition=0, key='k1', timestamp_ms=1577836801001,
headers=[('content-encoding', b'base64')])
producer.send(topic='virt2_0', value=json.dumps({'value': 2}), partition=0, key='k2', timestamp_ms=1577836802002,
headers=[('empty_value', b''), ('', b'empty name'), ('', b''), ('repetition', b'1'), ('repetition', b'2')])
producer.flush()
time.sleep(1)
producer.send(topic='virt2_0', value=json.dumps({'value': 3}), partition=1, key='k3', timestamp_ms=1577836803003,
headers=[('b', b'b'), ('a', b'a')])
producer.send(topic='virt2_0', value=json.dumps({'value': 4}), partition=1, key='k4', timestamp_ms=1577836804004,
headers=[('a', b'a'), ('b', b'b')])
producer.flush()
time.sleep(1)
producer.send(topic='virt2_1', value=json.dumps({'value': 5}), partition=0, key='k5', timestamp_ms=1577836805005)
producer.send(topic='virt2_1', value=json.dumps({'value': 6}), partition=0, key='k6', timestamp_ms=1577836806006)
producer.flush()
time.sleep(1)
producer.send(topic='virt2_1', value=json.dumps({'value': 7}), partition=1, key='k7', timestamp_ms=1577836807007)
producer.send(topic='virt2_1', value=json.dumps({'value': 8}), partition=1, key='k8', timestamp_ms=1577836808008)
producer.flush()
time.sleep(10)
members = describe_consumer_group('virt2')
# pprint.pprint(members)
members[0]['client_id'] = 'ClickHouse-instance-test-kafka-0'
members[1]['client_id'] = 'ClickHouse-instance-test-kafka-1'
result = instance.query("SELECT * FROM test.view ORDER BY value", ignore_error=True)
expected = '''\
1 k1 virt2_0 0 0 1577836801 1577836801001 ['content-encoding'] ['base64']
2 k2 virt2_0 0 1 1577836802 1577836802002 ['empty_value','','','repetition','repetition'] ['','empty name','','1','2']
3 k3 virt2_0 1 0 1577836803 1577836803003 ['b','a'] ['b','a']
4 k4 virt2_0 1 1 1577836804 1577836804004 ['a','b'] ['a','b']
5 k5 virt2_1 0 0 1577836805 1577836805005 [] []
6 k6 virt2_1 0 1 1577836806 1577836806006 [] []
7 k7 virt2_1 1 0 1577836807 1577836807007 [] []
8 k8 virt2_1 1 1 1577836808 1577836808008 [] []
'''
assert TSV(result) == TSV(expected)
@pytest.mark.timeout(120)
def test_kafka_produce_key_timestamp(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092")
topic_list = []
topic_list.append(NewTopic(name="insert3", num_partitions=1, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka_writer (key UInt64, value UInt64, _key String, _timestamp DateTime('UTC'))
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert3',
kafka_group_name = 'insert3',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
CREATE TABLE test.kafka (key UInt64, value UInt64, inserted_key String, inserted_timestamp DateTime('UTC'))
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert3',
kafka_group_name = 'insert3',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.view Engine=Log AS
SELECT key, value, inserted_key, toUnixTimestamp(inserted_timestamp), _key, _topic, _partition, _offset, toUnixTimestamp(_timestamp) FROM test.kafka;
''')
instance.query("INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format(1, 1, 'k1', 1577836801))
instance.query("INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format(2, 2, 'k2', 1577836802))
instance.query(
"INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({})),({},{},'{}',toDateTime({}))".format(3, 3,
'k3',
1577836803,
4, 4,
'k4',
1577836804))
instance.query("INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format(5, 5, 'k5', 1577836805))
while int(instance.query("SELECT count() FROM test.view")) < 5:
time.sleep(1)
result = instance.query("SELECT * FROM test.view ORDER BY value", ignore_error=True)
# print(result)
expected = '''\
1 1 k1 1577836801 k1 insert3 0 0 1577836801
2 2 k2 1577836802 k2 insert3 0 1 1577836802
3 3 k3 1577836803 k3 insert3 0 2 1577836803
4 4 k4 1577836804 k4 insert3 0 3 1577836804
5 5 k5 1577836805 k5 insert3 0 4 1577836805
'''
assert TSV(result) == TSV(expected)
@pytest.mark.timeout(600)
def test_kafka_flush_by_time(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092")
topic_list = []
topic_list.append(NewTopic(name="flush_by_time", num_partitions=1, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'flush_by_time',
kafka_group_name = 'flush_by_time',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_row_delimiter = '\\n';
SELECT * FROM test.kafka;
CREATE TABLE test.view (key UInt64, value UInt64, ts DateTime64(3) MATERIALIZED now64(3))
ENGINE = MergeTree()
ORDER BY key;
''')
cancel = threading.Event()
def produce():
while not cancel.is_set():
messages = []
messages.append(json.dumps({'key': 0, 'value': 0}))
kafka_produce('flush_by_time', messages)
time.sleep(0.8)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
instance.query('''
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
time.sleep(18)
result = instance.query('SELECT uniqExact(ts) = 2, count() >= 15 FROM test.view')
cancel.set()
kafka_thread.join()
# kafka_cluster.open_bash_shell('instance')
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
assert TSV(result) == TSV('1 1')
@pytest.mark.timeout(90)
def test_kafka_flush_by_block_size(kafka_cluster):
cancel = threading.Event()
def produce():
while not cancel.is_set():
messages = []
messages.append(json.dumps({'key': 0, 'value': 0}))
kafka_produce('flush_by_block_size', messages)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'flush_by_block_size',
kafka_group_name = 'flush_by_block_size',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_poll_max_batch_size = 1,
kafka_flush_interval_ms = 120000, /* should not flush by time during test */
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
# Wait for Kafka engine to consume this data
while 1 != int(instance.query(
"SELECT count() FROM system.parts WHERE database = 'test' AND table = 'view' AND name = 'all_1_1_0'")):
time.sleep(0.5)
cancel.set()
kafka_thread.join()
# more flushes can happens during test, we need to check only result of first flush (part named all_1_1_0).
result = instance.query("SELECT count() FROM test.view WHERE _part='all_1_1_0'")
# print(result)
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
# 100 = first poll should return 100 messages (and rows)
# not waiting for stream_flush_interval_ms
assert int(
result) == 100, 'Messages from kafka should be flushed when block of size kafka_max_block_size is formed!'
@pytest.mark.timeout(600)
def test_kafka_lot_of_partitions_partial_commit_of_bulk(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092")
topic_list = []
topic_list.append(NewTopic(name="topic_with_multiple_partitions2", num_partitions=10, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'topic_with_multiple_partitions2',
kafka_group_name = 'topic_with_multiple_partitions2',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 211,
kafka_flush_interval_ms = 500;
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
messages = []
count = 0
for dummy_msg in range(1000):
rows = []
for dummy_row in range(random.randrange(3, 10)):
count = count + 1
rows.append(json.dumps({'key': count, 'value': count}))
messages.append("\n".join(rows))
kafka_produce('topic_with_multiple_partitions2', messages)
instance.wait_for_log_line('kafka.*Stalled', repetitions=5)
result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.view')
print(result)
assert TSV(result) == TSV('{0}\t{0}\t{0}'.format(count))
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
@pytest.mark.timeout(1200)
def test_kafka_rebalance(kafka_cluster):
NUMBER_OF_CONSURRENT_CONSUMERS = 11
instance.query('''
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination (
key UInt64,
value UInt64,
_topic String,
_key String,
_offset UInt64,
_partition UInt64,
_timestamp Nullable(DateTime('UTC')),
_consumed_by LowCardinality(String)
)
ENGINE = MergeTree()
ORDER BY key;
''')
# kafka_cluster.open_bash_shell('instance')
# time.sleep(2)
admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092")
topic_list = []
topic_list.append(NewTopic(name="topic_with_multiple_partitions", num_partitions=11, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
cancel = threading.Event()
msg_index = [0]
def produce():
while not cancel.is_set():
messages = []
for _ in range(59):
messages.append(json.dumps({'key': msg_index[0], 'value': msg_index[0]}))
msg_index[0] += 1
kafka_produce('topic_with_multiple_partitions', messages)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS):
table_name = 'kafka_consumer{}'.format(consumer_index)
print(("Setting up {}".format(table_name)))
instance.query('''
DROP TABLE IF EXISTS test.{0};
DROP TABLE IF EXISTS test.{0}_mv;
CREATE TABLE test.{0} (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'topic_with_multiple_partitions',
kafka_group_name = 'rebalance_test_group',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 33,
kafka_flush_interval_ms = 500;
CREATE MATERIALIZED VIEW test.{0}_mv TO test.destination AS
SELECT
key,
value,
_topic,
_key,
_offset,
_partition,
_timestamp,
'{0}' as _consumed_by
FROM test.{0};
'''.format(table_name))
# kafka_cluster.open_bash_shell('instance')
# Waiting for test.kafka_consumerX to start consume ...
instance.wait_for_log_line('kafka_consumer{}.*Polled offset [0-9]+'.format(consumer_index))
cancel.set()
# I leave last one working by intent (to finish consuming after all rebalances)
for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS - 1):
print(("Dropping test.kafka_consumer{}".format(consumer_index)))
instance.query('DROP TABLE IF EXISTS test.kafka_consumer{} SYNC'.format(consumer_index))
# print(instance.query('SELECT count(), uniqExact(key), max(key) + 1 FROM test.destination'))
# kafka_cluster.open_bash_shell('instance')
while 1:
messages_consumed = int(instance.query('SELECT uniqExact(key) FROM test.destination'))
if messages_consumed >= msg_index[0]:
break
time.sleep(1)
print(("Waiting for finishing consuming (have {}, should be {})".format(messages_consumed, msg_index[0])))
print((instance.query('SELECT count(), uniqExact(key), max(key) + 1 FROM test.destination')))
# Some queries to debug...
# SELECT * FROM test.destination where key in (SELECT key FROM test.destination group by key having count() <> 1)
# select number + 1 as key from numbers(4141) x left join test.destination using (key) where test.destination.key = 0;
# SELECT * FROM test.destination WHERE key between 2360 and 2370 order by key;
# select _partition from test.destination group by _partition having count() <> max(_offset) + 1;
# select toUInt64(0) as _partition, number + 1 as _offset from numbers(400) x left join test.destination using (_partition,_offset) where test.destination.key = 0 order by _offset;
# SELECT * FROM test.destination WHERE _partition = 0 and _offset between 220 and 240 order by _offset;
# CREATE TABLE test.reference (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092',
# kafka_topic_list = 'topic_with_multiple_partitions',
# kafka_group_name = 'rebalance_test_group_reference',
# kafka_format = 'JSONEachRow',
# kafka_max_block_size = 100000;
#
# CREATE MATERIALIZED VIEW test.reference_mv Engine=Log AS
# SELECT key, value, _topic,_key,_offset, _partition, _timestamp, 'reference' as _consumed_by
# FROM test.reference;
#
# select * from test.reference_mv left join test.destination using (key,_topic,_offset,_partition) where test.destination._consumed_by = '';
result = int(instance.query('SELECT count() == uniqExact(key) FROM test.destination'))
for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS):
print(("kafka_consumer{}".format(consumer_index)))
table_name = 'kafka_consumer{}'.format(consumer_index)
instance.query('''
DROP TABLE IF EXISTS test.{0};
DROP TABLE IF EXISTS test.{0}_mv;
'''.format(table_name))
instance.query('''
DROP TABLE IF EXISTS test.destination;
''')
kafka_thread.join()
assert result == 1, 'Messages from kafka get duplicated!'
@pytest.mark.timeout(120)
def test_kafka_no_holes_when_write_suffix_failed(kafka_cluster):
messages = [json.dumps({'key': j + 1, 'value': 'x' * 300}) for j in range(22)]
kafka_produce('no_holes_when_write_suffix_failed', messages)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'no_holes_when_write_suffix_failed',
kafka_group_name = 'no_holes_when_write_suffix_failed',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 20,
kafka_flush_interval_ms = 2000;
CREATE TABLE test.view (key UInt64, value String)
ENGINE = ReplicatedMergeTree('/clickhouse/kafkatest/tables/no_holes_when_write_suffix_failed', 'node1')
ORDER BY key;
''')
# init PartitionManager (it starts container) earlier
pm = PartitionManager()
instance.query('''
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka
WHERE NOT sleepEachRow(0.25);
''')
instance.wait_for_log_line("Polled batch of 20 messages")
# the tricky part here is that disconnect should happen after write prefix, but before write suffix
# we have 0.25 (sleepEachRow) * 20 ( Rows ) = 5 sec window after "Polled batch of 20 messages"
# while materialized view is working to inject zookeeper failure
pm.drop_instance_zk_connections(instance)
instance.wait_for_log_line("Error.*(session has been expired|Connection loss).*while write prefix to view")
pm.heal_all()
instance.wait_for_log_line("Committed offset 22")
result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.view')
print(result)
# kafka_cluster.open_bash_shell('instance')
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
assert TSV(result) == TSV('22\t22\t22')
@pytest.mark.timeout(120)
def test_exception_from_destructor(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'xyz',
kafka_group_name = '',
kafka_format = 'JSONEachRow';
''')
instance.query_and_get_error('''
SELECT * FROM test.kafka;
''')
instance.query('''
DROP TABLE test.kafka;
''')
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'xyz',
kafka_group_name = '',
kafka_format = 'JSONEachRow';
''')
instance.query('''
DROP TABLE test.kafka;
''')
# kafka_cluster.open_bash_shell('instance')
assert TSV(instance.query('SELECT 1')) == TSV('1')
@pytest.mark.timeout(120)
def test_commits_of_unprocessed_messages_on_drop(kafka_cluster):
messages = [json.dumps({'key': j + 1, 'value': j + 1}) for j in range(1)]
kafka_produce('commits_of_unprocessed_messages_on_drop', messages)
instance.query('''
DROP TABLE IF EXISTS test.destination SYNC;
CREATE TABLE test.destination (
key UInt64,
value UInt64,
_topic String,
_key String,
_offset UInt64,
_partition UInt64,
_timestamp Nullable(DateTime('UTC')),
_consumed_by LowCardinality(String)
)
ENGINE = MergeTree()
ORDER BY key;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'commits_of_unprocessed_messages_on_drop',
kafka_group_name = 'commits_of_unprocessed_messages_on_drop_test_group',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 1000,
kafka_flush_interval_ms = 1000;
CREATE MATERIALIZED VIEW test.kafka_consumer TO test.destination AS
SELECT
key,
value,
_topic,
_key,
_offset,
_partition,
_timestamp
FROM test.kafka;
''')
# Waiting for test.kafka_consumer to start consume
instance.wait_for_log_line('Committed offset [0-9]+')
cancel = threading.Event()
i = [2]
def produce():
while not cancel.is_set():
messages = []
for _ in range(113):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
kafka_produce('commits_of_unprocessed_messages_on_drop', messages)
time.sleep(0.5)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
time.sleep(4)
instance.query('''
DROP TABLE test.kafka SYNC;
''')
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'commits_of_unprocessed_messages_on_drop',
kafka_group_name = 'commits_of_unprocessed_messages_on_drop_test_group',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 10000,
kafka_flush_interval_ms = 1000;
''')
cancel.set()
instance.wait_for_log_line('kafka.*Stalled', repetitions=5)
# kafka_cluster.open_bash_shell('instance')
# SELECT key, _timestamp, _offset FROM test.destination where runningDifference(key) <> 1 ORDER BY key;
result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.destination')
print(result)
instance.query('''
DROP TABLE test.kafka_consumer SYNC;
DROP TABLE test.destination SYNC;
''')
kafka_thread.join()
assert TSV(result) == TSV('{0}\t{0}\t{0}'.format(i[0] - 1)), 'Missing data!'
@pytest.mark.timeout(120)
def test_bad_reschedule(kafka_cluster):
messages = [json.dumps({'key': j + 1, 'value': j + 1}) for j in range(20000)]
kafka_produce('test_bad_reschedule', messages)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'test_bad_reschedule',
kafka_group_name = 'test_bad_reschedule',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 1000,
kafka_flush_interval_ms = 1000;
CREATE MATERIALIZED VIEW test.destination Engine=Log AS
SELECT
key,
now() as consume_ts,
value,
_topic,
_key,
_offset,
_partition,
_timestamp
FROM test.kafka;
''')
instance.wait_for_log_line("Committed offset 20000")
assert int(instance.query("SELECT max(consume_ts) - min(consume_ts) FROM test.destination")) < 8
@pytest.mark.timeout(300)
def test_kafka_duplicates_when_commit_failed(kafka_cluster):
messages = [json.dumps({'key': j + 1, 'value': 'x' * 300}) for j in range(22)]
kafka_produce('duplicates_when_commit_failed', messages)
instance.query('''
DROP TABLE IF EXISTS test.view SYNC;
DROP TABLE IF EXISTS test.consumer SYNC;
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'duplicates_when_commit_failed',
kafka_group_name = 'duplicates_when_commit_failed',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 20,
kafka_flush_interval_ms = 1000;
CREATE TABLE test.view (key UInt64, value String)
ENGINE = MergeTree()
ORDER BY key;
''')
instance.query('''
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka
WHERE NOT sleepEachRow(0.25);
''')
instance.wait_for_log_line("Polled batch of 20 messages")
# the tricky part here is that disconnect should happen after write prefix, but before we do commit
# we have 0.25 (sleepEachRow) * 20 ( Rows ) = 5 sec window after "Polled batch of 20 messages"
# while materialized view is working to inject zookeeper failure
kafka_cluster.pause_container('kafka1')
# if we restore the connection too fast (<30sec) librdkafka will not report any timeout
# (alternative is to decrease the default session timeouts for librdkafka)
#
# when the delay is too long (>50sec) broker will decide to remove us from the consumer group,
# and will start answering "Broker: Unknown member"
instance.wait_for_log_line("Exception during commit attempt: Local: Waiting for coordinator", timeout=45)
instance.wait_for_log_line("All commit attempts failed", look_behind_lines=500)
kafka_cluster.unpause_container('kafka1')
# kafka_cluster.open_bash_shell('instance')
instance.wait_for_log_line("Committed offset 22")
result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.view')
print(result)
instance.query('''
DROP TABLE test.consumer SYNC;
DROP TABLE test.view SYNC;
''')
# After https://github.com/edenhill/librdkafka/issues/2631
# timeout triggers rebalance, making further commits to the topic after getting back online
# impossible. So we have a duplicate in that scenario, but we report that situation properly.
assert TSV(result) == TSV('42\t22\t22')
# if we came to partition end we will repeat polling until reaching kafka_max_block_size or flush_interval
# that behavior is a bit quesionable - we can just take a bigger pauses between polls instead -
# to do more job in a single pass, and give more rest for a thread.
# But in cases of some peaky loads in kafka topic the current contract sounds more predictable and
# easier to understand, so let's keep it as is for now.
# also we can came to eof because we drained librdkafka internal queue too fast
@pytest.mark.timeout(120)
def test_premature_flush_on_eof(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'premature_flush_on_eof',
kafka_group_name = 'premature_flush_on_eof',
kafka_format = 'JSONEachRow';
SELECT * FROM test.kafka LIMIT 1;
CREATE TABLE test.destination (
key UInt64,
value UInt64,
_topic String,
_key String,
_offset UInt64,
_partition UInt64,
_timestamp Nullable(DateTime('UTC')),
_consumed_by LowCardinality(String)
)
ENGINE = MergeTree()
ORDER BY key;
''')
# messages created here will be consumed immedeately after MV creation
# reaching topic EOF.
# But we should not do flush immedeately after reaching EOF, because
# next poll can return more data, and we should respect kafka_flush_interval_ms
# and try to form bigger block
messages = [json.dumps({'key': j + 1, 'value': j + 1}) for j in range(1)]
kafka_produce('premature_flush_on_eof', messages)
instance.query('''
CREATE MATERIALIZED VIEW test.kafka_consumer TO test.destination AS
SELECT
key,
value,
_topic,
_key,
_offset,
_partition,
_timestamp
FROM test.kafka;
''')
# all subscriptions/assignments done during select, so it start sending data to test.destination
# immediately after creation of MV
instance.wait_for_log_line("Polled batch of 1 messages")
instance.wait_for_log_line("Stalled")
# produce more messages after delay
kafka_produce('premature_flush_on_eof', messages)
# data was not flushed yet (it will be flushed 7.5 sec after creating MV)
assert int(instance.query("SELECT count() FROM test.destination")) == 0
instance.wait_for_log_line("Committed offset 2")
# it should be single part, i.e. single insert
result = instance.query('SELECT _part, count() FROM test.destination group by _part')
assert TSV(result) == TSV('all_1_1_0\t2')
instance.query('''
DROP TABLE test.kafka_consumer;
DROP TABLE test.destination;
''')
@pytest.mark.timeout(120)
def test_kafka_unavailable(kafka_cluster):
messages = [json.dumps({'key': j + 1, 'value': j + 1}) for j in range(2000)]
kafka_produce('test_kafka_unavailable', messages)
kafka_cluster.pause_container('kafka1')
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'test_kafka_unavailable',
kafka_group_name = 'test_kafka_unavailable',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 1000,
kafka_flush_interval_ms = 1000;
CREATE MATERIALIZED VIEW test.destination Engine=Log AS
SELECT
key,
now() as consume_ts,
value,
_topic,
_key,
_offset,
_partition,
_timestamp
FROM test.kafka;
''')
instance.query("SELECT * FROM test.kafka")
instance.wait_for_log_line('brokers are down')
instance.wait_for_log_line('stalled. Reschedule', repetitions=2)
kafka_cluster.unpause_container('kafka1')
instance.wait_for_log_line("Committed offset 2000")
assert int(instance.query("SELECT count() FROM test.destination")) == 2000
time.sleep(5) # needed to give time for kafka client in python test to recovery
@pytest.mark.timeout(180)
def test_kafka_issue14202(kafka_cluster):
"""
INSERT INTO Kafka Engine from an empty SELECT sub query was leading to failure
"""
instance.query('''
CREATE TABLE test.empty_table (
dt Date,
some_string String
)
ENGINE = MergeTree()
PARTITION BY toYYYYMM(dt)
ORDER BY some_string;
CREATE TABLE test.kafka_q (t UInt64, `some_string` String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'issue14202',
kafka_group_name = 'issue14202',
kafka_format = 'JSONEachRow';
''')
time.sleep(3)
instance.query(
'INSERT INTO test.kafka_q SELECT t, some_string FROM ( SELECT dt AS t, some_string FROM test.empty_table )')
# check instance is alive
assert TSV(instance.query('SELECT 1')) == TSV('1')
instance.query('''
DROP TABLE test.empty_table;
DROP TABLE test.kafka_q;
''')
@pytest.mark.timeout(180)
def test_kafka_csv_with_thread_per_consumer(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'csv',
kafka_group_name = 'csv',
kafka_format = 'CSV',
kafka_row_delimiter = '\\n',
kafka_num_consumers = 4,
kafka_thread_per_consumer = 1;
''')
messages = []
for i in range(50):
messages.append('{i}, {i}'.format(i=i))
kafka_produce('csv', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
if __name__ == '__main__':
cluster.start()
input("Cluster created, press any key to destroy...")
cluster.shutdown()
|
mod_arty_crosshair102.py
|
# -*- coding: utf-8 -*-
import datetime
import re
import os
import json
import codecs
import urllib2
import urllib
import threading
from AvatarInputHandler import control_modes
import BigWorld
import BigWorld
from constants import AUTH_REALM
from gui.Scaleform.daapi.view.lobby.hangar.Hangar import Hangar
class Config(object):
def __init__(self):
self.enable = True
self.debug = False
self.ru = True if 'RU' in AUTH_REALM else False
self.version = 'v1.02(23.11.2015)'
self.author = 'by spoter, reven86'
self.description = 'arty_crosshair'
self.description_ru = 'Мод: "АртПрЫцел"'
self.author_ru = 'авторы: spoter, reven86'
self.name = 'arty_crosshair'
self.description_analytics = 'Мод: "АртПрЫцел"'
self.tid = 'UA-57975916-15'
self.sys_mes = {}
self.setup = {}
self._thread_analytics = None
self.analytics_started = False
self.language = None
self.xvm_installed = False
self.xvm_check()
self.res_mods = self.res_mods_init()
self.data = {}
self.default_config()
new_config = self.load_json(self.name, self.data)
self.data = new_config
if 'Русский' in self.data['config'].get('language'): self.ru = True
if self.ru:
self.description = self.description_ru
self.author = self.author_ru
@staticmethod
def res_mods_init():
wd = os.path.dirname(os.path.realpath(__file__))
wd = wd[0:wd.rfind('\\')]
wd = wd[0:wd.rfind('\\')]
wd = wd[0:wd.rfind('\\')]
return wd
def xvm_check(self):
try:
# noinspection PyUnresolvedReferences
import xvm_main
self.xvm_installed = True
except StandardError:
pass
def default_config(self):
self.data = {
'config': {
'enable': True, 'debug': False, 'default_status': True, 'language': 'Русский'
}, 'language': {
'Русский': {}, 'English': {}
}
}
def do_config(self):
self.enable = self.data['config'].get('enable', False)
self.debug = self.data['config'].get('debug', False)
if self.data['config'].get('language') in self.data['language']:
self.language = self.data['language'].get(self.data['config'].get('language'))
else:
self.data['config']['language'] = 'English'
self.language = self.data['language'].get('English')
def byte_ify(self, inputs):
if inputs:
if isinstance(inputs, dict):
return {self.byte_ify(key): self.byte_ify(value) for key, value in inputs.iteritems()}
elif isinstance(inputs, list):
return [self.byte_ify(element) for element in inputs]
elif isinstance(inputs, unicode):
return inputs.encode('utf-8')
else:
return inputs
return inputs
@staticmethod
def json_comments(text):
regex = r'\s*(#|\/{2}).*$'
regex_inline = r'(:?(?:\s)*([A-Za-z\d\.{}]*)|((?<=\").*\"),?)(?:\s)*(((#|(\/{2})).*)|)$'
lines = text.split('\n')
excluded = []
for index, line in enumerate(lines):
if re.search(regex, line):
if re.search(r'^' + regex, line, re.IGNORECASE):
excluded.append(lines[index])
elif re.search(regex_inline, line):
lines[index] = re.sub(regex_inline, r'\1', line)
for line in excluded:
lines.remove(line)
return '\n'.join(lines)
def load_json(self, name, config_old, save=False):
config_new = config_old
path = './res_mods/configs/spoter_mods/%s/' % self.name
if not os.path.exists(path):
os.makedirs(path)
new_path = '%s%s.json' % (path, name)
if save:
with codecs.open(new_path, 'w', encoding='utf-8-sig') as json_file:
data = json.dumps(config_old, sort_keys=True, indent=4, ensure_ascii=False, encoding='utf-8-sig', separators=(',', ': '))
json_file.write('%s' % self.byte_ify(data))
json_file.close()
config_new = config_old
else:
if os.path.isfile(new_path):
try:
with codecs.open(new_path, 'r', encoding='utf-8-sig') as json_file:
data = self.json_comments(json_file.read().decode('utf-8-sig'))
config_new = self.byte_ify(json.loads(data))
json_file.close()
except Exception as e:
self.sys_mess()
print '%s%s' % (self.sys_mes['ERROR'], e)
else:
self.sys_mess()
print '%s[%s, %s %s]' % (self.sys_mes['ERROR'], self.code_pa(self.description), self.version, self.sys_mes['MSG_RECREATE_CONFIG'])
with codecs.open(new_path, 'w', encoding='utf-8-sig') as json_file:
data = json.dumps(config_old, sort_keys=True, indent=4, ensure_ascii=False, encoding='utf-8-sig', separators=(',', ': '))
json_file.write('%s' % self.byte_ify(data))
json_file.close()
config_new = config_old
print '%s[%s, %s %s]' % (self.sys_mes['INFO'], self.code_pa(self.description), self.version, self.sys_mes['MSG_RECREATE_CONFIG_DONE'])
return config_new
@staticmethod
def code_pa(text):
try:
return text.encode('windows-1251')
except StandardError:
return text
def debugs(self, text):
if self.debug:
try:
text = text.encode('windows-1251')
except StandardError:
pass
print '%s%s [%s]: %s' % (datetime.datetime.now(), self.sys_mes['DEBUG'], self.code_pa(self.description), text)
def analytics_do(self):
if not self.analytics_started:
player = BigWorld.player()
param = urllib.urlencode({
'v': 1, # Version.
'tid': '%s' % self.tid, # Tracking ID / Property ID.
'cid': player.databaseID, # Anonymous Client ID.
't': 'screenview', # Screenview hit type.
'an': '%s' % self.description_analytics, # App name.
'av': '%s %s' % (self.description_analytics, self.version), # App version.
'cd': 'start [%s]' % AUTH_REALM # Screen name / content description.
})
self.debugs('http://www.google-analytics.com/collect?%s' % param)
urllib2.urlopen(url='http://www.google-analytics.com/collect?', data=param).read()
self.analytics_started = True
def analytics(self):
self._thread_analytics = threading.Thread(target=self.analytics_do, name='Thread')
self._thread_analytics.start()
def sys_mess(self):
self.sys_mes = {
'DEBUG': '[DEBUG]', 'LOAD_MOD': self.code_pa('[ЗАГРУЗКА]: ') if self.ru else '[LOAD_MOD]: ', 'INFO': self.code_pa('[ИНФО]: ') if self.ru else '[INFO]: ',
'ERROR': self.code_pa('[ОШИБКА]: ') if self.ru else '[ERROR]: ',
'MSG_RECREATE_CONFIG': self.code_pa('конфиг не найден, создаем заново') if self.ru else 'Config not found, recreating',
'MSG_RECREATE_CONFIG_DONE': self.code_pa('конфиг создан УСПЕШНО') if self.ru else 'Config recreating DONE',
'MSG_INIT': self.code_pa('применение настроек...') if self.ru else 'initialized ...', 'MSG_LANGUAGE_SET': self.code_pa('Выбран язык:') if self.ru else 'Language set to:',
'MSG_DISABLED': self.code_pa('отключен ...') if self.ru else 'disabled ...'
}
def load_mod(self):
self.do_config()
self.sys_mess()
print ''
print '%s[%s, %s]' % (self.sys_mes['LOAD_MOD'], self.code_pa(self.description), self.code_pa(self.author))
if self.enable:
self.debugs('Debug Activated ...')
print '%s[%s %s %s...]' % (self.sys_mes['INFO'], self.code_pa(self.description), self.sys_mes['MSG_LANGUAGE_SET'], self.code_pa(self.data['config'].get('language')))
print '%s[%s, %s %s]' % (self.sys_mes['INFO'], self.code_pa(self.description), self.version, self.sys_mes['MSG_INIT'])
else:
print '%s[%s, %s %s]' % (self.sys_mes['INFO'], self.code_pa(self.description), self.version, self.sys_mes['MSG_DISABLED'])
print ''
class ArtyCrosshair(object):
def __init__(self):
self.status = config.data.get('default_status', True)
# deformed functions:
def hook_update_all(self):
hooked_update_all(self)
config.analytics()
def hook_create_gun_marker(self, mode, is_strategic):
if arty_crosshair.status: status = False
else: status = is_strategic
hooked_create_gun_marker(self, mode, status)
#start mod
config = Config()
config.load_mod()
arty_crosshair = ArtyCrosshair()
#hooked
# noinspection PyProtectedMember
hooked_update_all = Hangar._Hangar__updateAll
# noinspection PyProtectedMember
hooked_create_gun_marker = control_modes._GunControlMode._GunControlMode__createGunMarker
#hook
Hangar._Hangar__updateAll = hook_update_all
# noinspection PyProtectedMember
control_modes._GunControlMode._GunControlMode__createGunMarker = hook_create_gun_marker
|
test_file.py
|
import sys
import os
import unittest
import itertools
import time
import threading
from array import array
from weakref import proxy
from test import test_support
from test.test_support import TESTFN, findfile, run_unittest
from UserList import UserList
class AutoFileTests(unittest.TestCase):
# file tests for which a test file is automatically set up
def setUp(self):
self.f = open(TESTFN, 'wb')
def tearDown(self):
if self.f:
self.f.close()
os.remove(TESTFN)
def testWeakRefs(self):
# verify weak references
p = proxy(self.f)
p.write('teststring')
self.assertEquals(self.f.tell(), p.tell())
self.f.close()
self.f = None
self.assertRaises(ReferenceError, getattr, p, 'tell')
def testAttributes(self):
# verify expected attributes exist
f = self.f
softspace = f.softspace
f.name # merely shouldn't blow up
f.mode # ditto
f.closed # ditto
# verify softspace is writable
f.softspace = softspace # merely shouldn't blow up
# verify the others aren't
for attr in 'name', 'mode', 'closed':
self.assertRaises((AttributeError, TypeError), setattr, f, attr, 'oops')
def testReadinto(self):
# verify readinto
self.f.write('12')
self.f.close()
a = array('c', 'x'*10)
self.f = open(TESTFN, 'rb')
n = self.f.readinto(a)
self.assertEquals('12', a.tostring()[:n])
def testWritelinesUserList(self):
# verify writelines with instance sequence
l = UserList(['1', '2'])
self.f.writelines(l)
self.f.close()
self.f = open(TESTFN, 'rb')
buf = self.f.read()
self.assertEquals(buf, '12')
def testWritelinesIntegers(self):
# verify writelines with integers
self.assertRaises(TypeError, self.f.writelines, [1, 2, 3])
def testWritelinesIntegersUserList(self):
# verify writelines with integers in UserList
l = UserList([1,2,3])
self.assertRaises(TypeError, self.f.writelines, l)
def testWritelinesNonString(self):
# verify writelines with non-string object
class NonString:
pass
self.assertRaises(TypeError, self.f.writelines,
[NonString(), NonString()])
def testRepr(self):
# verify repr works
self.assert_(repr(self.f).startswith("<open file '" + TESTFN))
def testErrors(self):
f = self.f
self.assertEquals(f.name, TESTFN)
self.assert_(not f.isatty())
self.assert_(not f.closed)
self.assertRaises(TypeError, f.readinto, "")
f.close()
self.assert_(f.closed)
def testMethods(self):
methods = ['fileno', 'flush', 'isatty', 'next', 'read', 'readinto',
'readline', 'readlines', 'seek', 'tell', 'truncate',
'write', 'xreadlines', '__iter__']
if sys.platform.startswith('atheos'):
methods.remove('truncate')
# __exit__ should close the file
self.f.__exit__(None, None, None)
self.assert_(self.f.closed)
for methodname in methods:
method = getattr(self.f, methodname)
# should raise on closed file
self.assertRaises(ValueError, method)
self.assertRaises(ValueError, self.f.writelines, [])
# file is closed, __exit__ shouldn't do anything
self.assertEquals(self.f.__exit__(None, None, None), None)
# it must also return None if an exception was given
try:
1/0
except:
self.assertEquals(self.f.__exit__(*sys.exc_info()), None)
class OtherFileTests(unittest.TestCase):
def testModeStrings(self):
# check invalid mode strings
for mode in ("", "aU", "wU+"):
try:
f = open(TESTFN, mode)
except ValueError:
pass
else:
f.close()
self.fail('%r is an invalid file mode' % mode)
# Some invalid modes fail on Windows, but pass on Unix
# Issue3965: avoid a crash on Windows when filename is unicode
for name in (TESTFN, unicode(TESTFN), unicode(TESTFN + '\t')):
try:
f = open(name, "rr")
except IOError:
pass
else:
f.close()
def testStdin(self):
# This causes the interpreter to exit on OSF1 v5.1.
if sys.platform != 'osf1V5':
self.assertRaises(IOError, sys.stdin.seek, -1)
else:
print >>sys.__stdout__, (
' Skipping sys.stdin.seek(-1), it may crash the interpreter.'
' Test manually.')
self.assertRaises(IOError, sys.stdin.truncate)
def testUnicodeOpen(self):
# verify repr works for unicode too
f = open(unicode(TESTFN), "w")
self.assert_(repr(f).startswith("<open file u'" + TESTFN))
f.close()
os.unlink(TESTFN)
def testBadModeArgument(self):
# verify that we get a sensible error message for bad mode argument
bad_mode = "qwerty"
try:
f = open(TESTFN, bad_mode)
except ValueError, msg:
if msg[0] != 0:
s = str(msg)
if s.find(TESTFN) != -1 or s.find(bad_mode) == -1:
self.fail("bad error message for invalid mode: %s" % s)
# if msg[0] == 0, we're probably on Windows where there may be
# no obvious way to discover why open() failed.
else:
f.close()
self.fail("no error for invalid mode: %s" % bad_mode)
def testSetBufferSize(self):
# make sure that explicitly setting the buffer size doesn't cause
# misbehaviour especially with repeated close() calls
for s in (-1, 0, 1, 512):
try:
f = open(TESTFN, 'w', s)
f.write(str(s))
f.close()
f.close()
f = open(TESTFN, 'r', s)
d = int(f.read())
f.close()
f.close()
except IOError, msg:
self.fail('error setting buffer size %d: %s' % (s, str(msg)))
self.assertEquals(d, s)
def testTruncateOnWindows(self):
os.unlink(TESTFN)
def bug801631():
# SF bug <http://www.python.org/sf/801631>
# "file.truncate fault on windows"
f = open(TESTFN, 'wb')
f.write('12345678901') # 11 bytes
f.close()
f = open(TESTFN,'rb+')
data = f.read(5)
if data != '12345':
self.fail("Read on file opened for update failed %r" % data)
if f.tell() != 5:
self.fail("File pos after read wrong %d" % f.tell())
f.truncate()
if f.tell() != 5:
self.fail("File pos after ftruncate wrong %d" % f.tell())
f.close()
size = os.path.getsize(TESTFN)
if size != 5:
self.fail("File size after ftruncate wrong %d" % size)
try:
bug801631()
finally:
os.unlink(TESTFN)
def testIteration(self):
# Test the complex interaction when mixing file-iteration and the
# various read* methods. Ostensibly, the mixture could just be tested
# to work when it should work according to the Python language,
# instead of fail when it should fail according to the current CPython
# implementation. People don't always program Python the way they
# should, though, and the implemenation might change in subtle ways,
# so we explicitly test for errors, too; the test will just have to
# be updated when the implementation changes.
dataoffset = 16384
filler = "ham\n"
assert not dataoffset % len(filler), \
"dataoffset must be multiple of len(filler)"
nchunks = dataoffset // len(filler)
testlines = [
"spam, spam and eggs\n",
"eggs, spam, ham and spam\n",
"saussages, spam, spam and eggs\n",
"spam, ham, spam and eggs\n",
"spam, spam, spam, spam, spam, ham, spam\n",
"wonderful spaaaaaam.\n"
]
methods = [("readline", ()), ("read", ()), ("readlines", ()),
("readinto", (array("c", " "*100),))]
try:
# Prepare the testfile
bag = open(TESTFN, "w")
bag.write(filler * nchunks)
bag.writelines(testlines)
bag.close()
# Test for appropriate errors mixing read* and iteration
for methodname, args in methods:
f = open(TESTFN)
if f.next() != filler:
self.fail, "Broken testfile"
meth = getattr(f, methodname)
try:
meth(*args)
except ValueError:
pass
else:
self.fail("%s%r after next() didn't raise ValueError" %
(methodname, args))
f.close()
# Test to see if harmless (by accident) mixing of read* and
# iteration still works. This depends on the size of the internal
# iteration buffer (currently 8192,) but we can test it in a
# flexible manner. Each line in the bag o' ham is 4 bytes
# ("h", "a", "m", "\n"), so 4096 lines of that should get us
# exactly on the buffer boundary for any power-of-2 buffersize
# between 4 and 16384 (inclusive).
f = open(TESTFN)
for i in range(nchunks):
f.next()
testline = testlines.pop(0)
try:
line = f.readline()
except ValueError:
self.fail("readline() after next() with supposedly empty "
"iteration-buffer failed anyway")
if line != testline:
self.fail("readline() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
testline = testlines.pop(0)
buf = array("c", "\x00" * len(testline))
try:
f.readinto(buf)
except ValueError:
self.fail("readinto() after next() with supposedly empty "
"iteration-buffer failed anyway")
line = buf.tostring()
if line != testline:
self.fail("readinto() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
testline = testlines.pop(0)
try:
line = f.read(len(testline))
except ValueError:
self.fail("read() after next() with supposedly empty "
"iteration-buffer failed anyway")
if line != testline:
self.fail("read() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
try:
lines = f.readlines()
except ValueError:
self.fail("readlines() after next() with supposedly empty "
"iteration-buffer failed anyway")
if lines != testlines:
self.fail("readlines() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
# Reading after iteration hit EOF shouldn't hurt either
f = open(TESTFN)
try:
for line in f:
pass
try:
f.readline()
f.readinto(buf)
f.read()
f.readlines()
except ValueError:
self.fail("read* failed after next() consumed file")
finally:
f.close()
finally:
os.unlink(TESTFN)
class FileSubclassTests(unittest.TestCase):
def testExit(self):
# test that exiting with context calls subclass' close
class C(file):
def __init__(self, *args):
self.subclass_closed = False
file.__init__(self, *args)
def close(self):
self.subclass_closed = True
file.close(self)
with C(TESTFN, 'w') as f:
pass
self.failUnless(f.subclass_closed)
class FileThreadingTests(unittest.TestCase):
# These tests check the ability to call various methods of file objects
# (including close()) concurrently without crashing the Python interpreter.
# See #815646, #595601
def setUp(self):
self.f = None
self.filename = TESTFN
with open(self.filename, "w") as f:
f.write("\n".join("0123456789"))
self._count_lock = threading.Lock()
self.close_count = 0
self.close_success_count = 0
def tearDown(self):
if self.f:
try:
self.f.close()
except (EnvironmentError, ValueError):
pass
try:
os.remove(self.filename)
except EnvironmentError:
pass
def _create_file(self):
self.f = open(self.filename, "w+")
def _close_file(self):
with self._count_lock:
self.close_count += 1
self.f.close()
with self._count_lock:
self.close_success_count += 1
def _close_and_reopen_file(self):
self._close_file()
# if close raises an exception thats fine, self.f remains valid so
# we don't need to reopen.
self._create_file()
def _run_workers(self, func, nb_workers, duration=0.2):
with self._count_lock:
self.close_count = 0
self.close_success_count = 0
self.do_continue = True
threads = []
try:
for i in range(nb_workers):
t = threading.Thread(target=func)
t.start()
threads.append(t)
for _ in xrange(100):
time.sleep(duration/100)
with self._count_lock:
if self.close_count-self.close_success_count > nb_workers+1:
if test_support.verbose:
print 'Q',
break
time.sleep(duration)
finally:
self.do_continue = False
for t in threads:
t.join()
def _test_close_open_io(self, io_func, nb_workers=5):
def worker():
self._create_file()
funcs = itertools.cycle((
lambda: io_func(),
lambda: self._close_and_reopen_file(),
))
for f in funcs:
if not self.do_continue:
break
try:
f()
except (IOError, ValueError):
pass
self._run_workers(worker, nb_workers)
if test_support.verbose:
# Useful verbose statistics when tuning this test to take
# less time to run but still ensuring that its still useful.
#
# the percent of close calls that raised an error
percent = 100. - 100.*self.close_success_count/self.close_count
print self.close_count, ('%.4f ' % percent),
def test_close_open(self):
def io_func():
pass
self._test_close_open_io(io_func)
def test_close_open_flush(self):
def io_func():
self.f.flush()
self._test_close_open_io(io_func)
def test_close_open_iter(self):
def io_func():
list(iter(self.f))
self._test_close_open_io(io_func)
def test_close_open_isatty(self):
def io_func():
self.f.isatty()
self._test_close_open_io(io_func)
def test_close_open_print(self):
def io_func():
print >> self.f, ''
self._test_close_open_io(io_func)
def test_close_open_read(self):
def io_func():
self.f.read(0)
self._test_close_open_io(io_func)
def test_close_open_readinto(self):
def io_func():
a = array('c', 'xxxxx')
self.f.readinto(a)
self._test_close_open_io(io_func)
def test_close_open_readline(self):
def io_func():
self.f.readline()
self._test_close_open_io(io_func)
def test_close_open_readlines(self):
def io_func():
self.f.readlines()
self._test_close_open_io(io_func)
def test_close_open_seek(self):
def io_func():
self.f.seek(0, 0)
self._test_close_open_io(io_func)
def test_close_open_tell(self):
def io_func():
self.f.tell()
self._test_close_open_io(io_func)
def test_close_open_truncate(self):
def io_func():
self.f.truncate()
self._test_close_open_io(io_func)
def test_close_open_write(self):
def io_func():
self.f.write('')
self._test_close_open_io(io_func)
def test_close_open_writelines(self):
def io_func():
self.f.writelines('')
self._test_close_open_io(io_func)
class StdoutTests(unittest.TestCase):
def test_move_stdout_on_write(self):
# Issue 3242: sys.stdout can be replaced (and freed) during a
# print statement; prevent a segfault in this case
save_stdout = sys.stdout
class File:
def write(self, data):
if '\n' in data:
sys.stdout = save_stdout
try:
sys.stdout = File()
print "some text"
finally:
sys.stdout = save_stdout
def test_del_stdout_before_print(self):
# Issue 4597: 'print' with no argument wasn't reporting when
# sys.stdout was deleted.
save_stdout = sys.stdout
del sys.stdout
try:
print
except RuntimeError as e:
self.assertEquals(str(e), "lost sys.stdout")
else:
self.fail("Expected RuntimeError")
finally:
sys.stdout = save_stdout
def test_main():
# Historically, these tests have been sloppy about removing TESTFN.
# So get rid of it no matter what.
try:
run_unittest(AutoFileTests, OtherFileTests, FileSubclassTests,
FileThreadingTests, StdoutTests)
finally:
if os.path.exists(TESTFN):
os.unlink(TESTFN)
if __name__ == '__main__':
test_main()
|
views.py
|
# -*- coding: utf-8 -*-
from multiprocessing import Process
import simplejson as json
import time
import json
import sqlparse
import wtforms_json
import os
from django.contrib.auth.mixins import PermissionRequiredMixin, LoginRequiredMixin
from django.shortcuts import render
from pymongo import DESCENDING
from django.views import View
from themis.utils.jsonres import temRes
from themis.utils.raiseerr import APIError
from themis.utils.wtform_models import SimpleForm, ComplexForm
from themis.themis import Themis
from themis.rule_analysis.db.mongo_operat import MongoOperat
from sql.models import Instance
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
import logging
logger = logging.getLogger('default')
class BaseHandler(LoginRequiredMixin, PermissionRequiredMixin, View):
permission_required = 'sql.menu_themis'
raise_exception = True
@property
def mongo_client(self):
return MongoOperat()
class SqlReRuleSetIndex(BaseHandler):
def get(self, request):
return render(request, "rule_set.html")
class RuleSimpleAdditoin(BaseHandler):
def get(self, request):
return render(request, "rule_simple_add.html")
class RuleComplexAddition(BaseHandler):
def get(self, request):
return render(request, "rule_complex_add.html")
class RuleAddition(BaseHandler):
@temRes
def post(self, request):
"""
规则增加功能
"""
argument = json.loads(request.body)
wtforms_json.init()
try:
if argument["rule_complexity"] == "simple":
form = SimpleForm.from_json(argument, skip_unknown_keys=False)
elif argument["rule_complexity"] == "complex":
form = ComplexForm.from_json(argument, skip_unknown_keys=False)
if not form.validate():
message = list(form.errors.values())[0][0]
return {"errcode": 30061, "message": message}
if argument["rule_complexity"] == "complex":
filename = argument["rule_name"].lower() + ".py"
if not os.path.exists(
"../rule_analysis/rule/extend/" + filename):
return {"errcode": 30063, "message": u"需要先上传脚本"}
record = self.mongo_client.get_collection("rule").find_one({
"rule_name": argument["rule_name"].upper(),
"db_type": argument["db_type"]})
if record:
return {"errcode": 30062, "message": u"规则已经存在"}
argument["rule_name"] = argument["rule_name"].upper()
argument["solution"] = argument["rule_solution"].split("\n")
argument["max_score"] = float(argument["max_score"])
argument["weight"] = float(argument["weight"])
if argument["input_parms"]:
for index, value in enumerate(argument["input_parms"]):
argument["input_parms"][index]["parm_value"] = \
float(value["parm_value"])
self.mongo_client.get_collection("rule").insert_one(argument)
return {"errcode": 80061, "message": u"增加规则成功"}
except wtforms_json.InvalidData as e:
return {"errcode": 30060, "message": str(e)}
class RuleUpload(BaseHandler):
@temRes
def post(self, request):
"""
复杂规则增加之代码文件上传
"""
f = request.FILES["pyfile"]
filename = f.name
if "." in filename:
if filename.split(".")[1] != "py":
return {"errcode": 30062, "message": u"文件类型不正确"}
else:
return {"errcode": 30062, "message": u"文件类型不正确"}
with open(BASE_DIR + "/themis/rule_analysis/rule/extend/" + filename.lower(), "wb+") as file:
for chunk in f.chunks():
file.write(chunk)
context = {
"files": [{"name": "success", "mimetype": "text"}],
"errcode": 80070,
"message": "success"
}
return context
class SqlReRuleSetInfoIndex(BaseHandler):
def get(self, request):
"""
api: /new/version/sql/review/rule/info/index
任务发布界面之ip地址显示
"""
# 获取实例列表
instances = Instance.objects.filter(type='master')
return render(request, "rule_set_info.html", {'instances': instances})
class SqlReviewTaskIndex(BaseHandler):
def get(self, request):
return render(request, "task_info.html")
class SqlReviewRuleInfo(BaseHandler):
@temRes
def get(self, request):
"""
api: /sqlreview/rule/info
规则展示界面
"""
results = self.mongo_client.get_collection("rule").find({})
data = []
for value in results:
parms = [0, 0, 0, 0, 0]
for index, temp in enumerate(value["input_parms"]):
parms[index] = temp.get("parm_value", 0)
data.append([
value["rule_name"],
value["rule_summary"],
value["rule_status"],
value["weight"],
value["max_score"],
value["rule_type"],
"Oracle" if value["db_type"] == "O" else "Mysql",
parms[0],
parms[1],
parms[2],
parms[3],
parms[4],
value.get("exclude_obj_type", "无")
])
return {"errcode": 80013, "message": u"查询成功", "data": data}
@temRes
def post(self, request):
rule_name = request.POST.get("id")
db_type = request.POST.get("dbtype")
if db_type == "Oracle":
dbtype = "O"
elif db_type == "Mysql":
dbtype = "mysql"
else:
raise APIError(u"db类型不正确", 30055)
if not rule_name:
raise APIError(u"规则名称不正确", 30057)
flag = request.POST.get("flag")
rule_name = rule_name.split("$")[0]
record = self.mongo_client.get_collection("rule"). \
find_one({"rule_name": rule_name, "db_type": dbtype})
if not record:
raise APIError(u"没有相关规则", 30055)
if flag == "maxscore":
value = request.POST.get("value", None)
oldvalue = request.POST.get("value", None)
self.mongo_client.get_collection("rule").update_one(
{"rule_name": rule_name, "db_type": dbtype},
{"$set": {"max_score": str(value)}}
)
elif flag == "status":
value = request.POST.get("value", None)
oldvalue = request.POST.get("oldvalue", None)
if value not in ["ON", "OFF"] or oldvalue not in ["ON", "OFF"]:
raise APIError(u"状态不正确", 30054)
self.mongo_client.get_collection("rule").update_one(
{"rule_name": rule_name, "db_type": dbtype},
{"$set": {"rule_status": value}}
)
elif flag == "weight":
try:
value = float(request.POST.get("value", None))
except Exception:
raise APIError(u"设置错误", 30059)
oldvalue = request.POST.get("oldvalue", None)
self.mongo_client.get_collection("rule").update_one(
{"rule_name": rule_name, "db_type": dbtype},
{"$set": {"weight": value}}
)
elif flag in ["parm1", "parm2", "parm3", "parm4", "parm5"]:
num = int(flag[-1]) - 1
edit_parm = "input_parms." + str(num) + ".parm_value"
if len(record['input_parms']) < int(flag[-1]):
raise APIError(u"设置错误", 30055)
try:
value = float(request.POST.get("value", None))
except Exception:
raise APIError(u"设置错误", 30059)
oldvalue = request.POST.get("oldvalue", None)
self.mongo_client.get_collection("rule").update_one(
{"rule_name": rule_name, "db_type": dbtype},
{"$set": {edit_parm: value}}
)
context = {
"message": u"规则设置成功",
"errcode": 80025,
"data": value,
"olddata": oldvalue
}
return context
class SqlReviewGetStruct(BaseHandler):
@temRes
def get(self, request):
"""
api: /new/version/sql/review/get/struct
根据实例名获取规则信息
"""
flag = request.GET.get("flag")
instance_name = request.GET.get("instance_name")
db_type = Instance.objects.get(instance_name=instance_name).db_type
if flag.upper() not in ["OBJ", "SQLPLAN", "SQLSTAT", "TEXT"]:
raise APIError(u"规则类型不正确", 30058)
records = self.mongo_client.get_collection("rule").find(
{"rule_type": flag.upper(), "db_type": db_type}
)
temp = []
for value in records:
temp.append([
value["rule_name"],
value["rule_summary"],
value["rule_status"],
value["weight"],
value["max_score"],
value["rule_type"],
value["db_type"],
value.get("exclude_obj_type", "无")
])
context = {"message": u"查询成功", "errcode": 80050, "data": temp}
return context
class SqlReviewJobData(BaseHandler):
@temRes
def get(self, request):
"""
api: /new/version/sql/review/job/data
获取任务详情列表
"""
sEcho = request.GET.get("sEcho", None)
start = request.GET.get("start", None)
username = request.GET.get("username", None)
operuser = request.GET.get("operuser", None)
status = request.GET.get("status", None)
if status:
if status not in ["0", "1", "2"]:
raise APIError(u"状态不正确", 30060)
starttime = request.GET.get("starttime", None)
endtime = request.GET.get("endtime", None)
if starttime and endtime:
try:
starttime = time.strftime(
"%Y-%m-%d %H:%M:%S", time.strptime(starttime, '%Y-%m-%d'))
endtime = time.strftime(
"%Y-%m-%d %H:%M:%S", time.strptime(endtime, '%Y-%m-%d'))
except Exception:
raise APIError(u"时间戳不正确", 30062)
sql = {}
if username:
sql.update({"name": {"$regex": username}})
if operuser:
sql.update({"operator_user": {"$regex": operuser}})
if status:
sql.update({"status": status})
if starttime:
sql.update({"create_time": {"$gt": starttime}})
if endtime:
sql.update({"end_time": {"$lt": endtime}})
records = self.mongo_client.get_collection("job").find(sql). \
sort("create_time", DESCENDING).skip(int(start)).limit(10)
number = self.mongo_client.get_collection("job").find(sql).count()
result = []
for value in records:
temp = {}
if value["status"] == "1":
task_status = "成功"
elif value["status"] == "0":
task_status = "失败"
else:
task_status = "正在运行"
temp.update({
"operuser": value["operator_user"],
"username": value["name"].split("#")[0],
"create_time": value["create_time"],
"status": task_status,
"task_type": value["name"].split("#")[1],
"capture_start_time": value["desc"]["capture_time_s"],
"capture_stop_time": value["desc"]["capture_time_e"],
"id": value["id"]
})
result.append(temp)
temp = {}
res_data = {
"sEcho": sEcho,
"iTotalRecords": number,
"iTotalDisplayRecords": number,
"aaData": result
}
return res_data
class SqlReviewTaskRuleInfo(BaseHandler):
@temRes
def post(self, request):
"""
api:/new/version/sql/review/task/rule/info
根据任务id获取任务详情
"""
flag = request.POST.get("flag", None)
if flag == "1":
task_uuid = request.POST.get("task_uuid", None)
else:
task_uuid = request.POST.get("task_uuid", None)
if len(task_uuid) > 10:
raise APIError(u"选择任务过多", 30063)
if not task_uuid:
raise APIError(u"请选择相应任务", 30064)
rule_type = request.POST.get("rule_type", None)
if not rule_type:
raise APIError(u"任务类型不能为空", 30065)
results = self.mongo_client.get_collection("results").find_one(
{"task_uuid": task_uuid}
)
rule_info = self.mongo_client.get_collection("rule").find(
{"rule_type": rule_type.upper()}
)
if not rule_info:
raise APIError(u"任务类型不正确", 30066)
job_info = self.mongo_client.get_collection("job").find_one({"id": task_uuid})
rule_summary = {}
for value in rule_info:
rule_summary.update({
value["rule_name"]: [
value["rule_summary"],
value["exclude_obj_type"]
],
})
search_temp = {}
# mysql数据库的任务
port = job_info["desc"]["port"]
search_temp.update({
"DB_IP": job_info["desc"]["db_ip"],
"OWNER": job_info["desc"]["owner"]
})
mysql_score = []
[mysql_score.append(float(data["max_score"]))
for data in self.mongo_client.get_collection("rule").find(
{"db_type": "mysql", "rule_type": rule_type.upper()}
)]
total_score = sum(mysql_score)
rules = []
rule_flag = ""
# 根据规则类型分类,响应不同结果
if rule_type.upper() == "OBJ":
for key, value in results.items():
if value and isinstance(results[key], dict):
if value["records"]:
temp_set = []
# compute records value
[temp_set.append(temp[0]) for temp in value["records"]]
search_temp.update(
{"OBJECT_TYPE": rule_summary[key][1]}
)
# prevent object
prevent_obj = self.mongo_client. \
get_collection("exclude_obj_info"). \
find(search_temp)
prevent_temp = []
[prevent_temp.append(data["OBJECT_NAME"])
for data in prevent_obj]
final_set = list(set(temp_set) - set(prevent_temp))
score = value["scores"]
rules.append(
[key, rule_summary[key][0], len(final_set), score]
)
rule_flag = rule_type.upper()
elif rule_type.upper() in ["SQLPLAN", "SQLSTAT", "TEXT"]:
for key, value in results.items():
if value and isinstance(results[key], dict):
num = 0
for temp in results[key].keys():
if "#" in temp:
num += 1
rules.append(
[key, rule_summary[key][0], num, value["scores"]])
rule_flag = rule_type.upper()
context = {
"task_uuid": task_uuid,
"ip": search_temp["DB_IP"],
"port": port,
"schema": search_temp["OWNER"],
"rules": rules,
"rule_flag": rule_flag,
"total_score": total_score,
"message": u"查询成功",
"errcode": 80050
}
return context
class SqlReviewTaskRuleDetailInfo(BaseHandler):
@temRes
def post(self, request):
"""
根据任务id和规则名称获取规则违反的具体信息
"""
task_uuid = request.POST.get("task_uuid", None)
rule_name = request.POST.get("rule_name", None)
if not task_uuid:
raise APIError(u"任务id不正确", 30063)
results = self.mongo_client.get_collection("results").find_one(
{"task_uuid": task_uuid}, {rule_name: 1})
rule_info = self.mongo_client.get_collection("rule").find_one(
{"rule_name": rule_name})
job_info = self.mongo_client.get_collection("job").find_one(
{"id": task_uuid})
search_temp = {}
if job_info["desc"]["port"] == "1521":
search_temp.update({
"DB_IP": job_info["desc"]["db_ip"],
"OWNER": job_info["desc"]["owner"],
"INSTANCE_NAME": job_info["desc"]["instance_name"].upper()})
table_title = []
title = []
records = []
flag = ""
# 根据规则类型走不通分支
if rule_info["rule_type"] == "OBJ":
if results[rule_name]["records"]:
for data in rule_info["output_parms"]:
table_title.append(data["parm_desc"])
for data in rule_info["input_parms"]:
title.append([data["parm_value"], data["parm_desc"]])
for data in results[rule_name]["records"]:
if data not in records:
records.append(data)
flag = rule_info["rule_type"]
elif rule_info["rule_type"] in ["SQLPLAN", "SQLSTAT"]:
for key in results[rule_name].keys():
if "#" in key:
if results[rule_name][key]["obj_name"]:
obj_name = results[rule_name][key]["obj_name"]
else:
obj_name = u"空"
cost = results[rule_name][key].get("cost", None) \
if results[rule_name][key].get("cost", None) else "空"
if results[rule_name][key].get("stat"):
count = results[rule_name][key].get("stat").get("ts_cnt", u"空")
else:
count = "空"
records.append([
key.split("#")[0],
results[rule_name][key]["sql_text"],
key.split("#")[1],
key.split("#")[2],
obj_name,
cost,
count
])
flag = rule_info["rule_type"]
title = rule_name
table_title = ""
elif rule_info["rule_type"] == "TEXT":
for key in results[rule_name].keys():
if "#" in key:
if len(results[rule_name][key]["sql_text"]) > 40:
sqltext = results[rule_name][key]["sql_text"][:40]
else:
sqltext = results[rule_name][key]["sql_text"]
records.append([key.split("#")[0], sqltext])
flag = rule_info["rule_type"]
title = rule_name
table_title = ""
solution = "<br>".join(rule_info["solution"])
context = {
"task_uuid": task_uuid,
"title": title,
"table_title": table_title,
"rule_name": rule_name,
"records": records,
"flag": flag,
"solution": solution,
"message": u"查询成功",
"errcode": 80051
}
return context
class SqlReviewTaskRulePlanInfo(BaseHandler):
@temRes
def post(self, request):
"""
根据任务id,规则名称,hash_value获取统计信息和文本
"""
argument = json.loads(request.body)
task_uuid = argument.get("task_uuid", None)
sql_id_hash = argument.get("sql_id_hash", None)
rule_name = argument.get("rule_name", None)
if not task_uuid:
raise APIError(u"任务id不正确", 30063)
plan = []
sql_id = argument.get("id", None)
if sql_id == "v":
search_key = rule_name + "." + sql_id_hash + "#v"
keyword = sql_id_hash + "#v"
else:
search_key = rule_name + "." + sql_id_hash + "#" + sql_id
keyword = sql_id_hash + "#" + sql_id
record = self.mongo_client.get_collection("results").find_one(
{"task_uuid": str(task_uuid)}, {str(search_key): 1}
)
task_info = self.mongo_client.get_collection("job").find_one(
{"id": str(task_uuid)}
)
stat_title = []
stat_data = []
obj_title = []
obj_data = []
if record[rule_name][keyword]["stat"]:
temp = record[rule_name][keyword]["stat"]
stat_title = temp.keys()
stat_data = temp.values()
sql_fulltext = sqlparse.format(
record[rule_name][keyword]["sql_fulltext"], reindent=True
)
if int(task_info["desc"]["port"]) == 1521:
flag = "O"
if record[rule_name][keyword]["obj_info"]:
temp = record[rule_name][keyword]["obj_info"]
obj_title = temp.keys()
[obj_data.append(str(data)) for data in temp.values()]
plan = record[rule_name][keyword]["plan"]
# 将执行计划根据id进行排序
plan.sort(key=lambda x: x['ID'])
else:
obj_title = []
obj_data = []
flag = "mysql"
plan = record[rule_name][keyword]["plan"]
context = {
"sql_fulltext": sql_fulltext,
"plan": plan,
"obj_title": obj_title,
"obj_data": [obj_data],
"stat_title": list(stat_title),
"stat_data": [list(stat_data)],
"flag": flag,
"message": u"查询成功",
"errcode": 80054
}
return context
class SqlReviewTaskRuleTextInfo(BaseHandler):
@temRes
def post(self, request):
"""
获取文本类规则的详细信息
"""
argument = json.loads(request.body)
task_uuid = argument.get("task_uuid", None)
sql_id_hash = argument.get("sql_id_hash", None)
rule_name = argument.get("rule_name", None)
if not task_uuid:
raise APIError(u"任务id不正确", 30063)
sql_id = sql_id_hash + "#v"
search_key = rule_name + "." + sql_id
record = self.mongo_client.get_collection("results"). \
find_one(
{"task_uuid": str(task_uuid)},
{str(search_key): 1}
)
sqltext = sqlparse.format(
record[rule_name][sql_id]["sql_text"], reindent=True
)
sqlstat = record[rule_name][sql_id]["stat"]
stat_title = []
stat_list = []
for index, value in enumerate(sqlstat):
if index == 0:
stat_title = value.keys()
temp = []
[temp.append(str(data)) for data in value.values()]
stat_list.append(temp)
context = {
"message": u"查询成功",
"errcode": 80055,
"sqltext": sqltext,
"stat_title": list(stat_title),
"stat_list": stat_list,
"sql_id": sql_id.split("#")[0]
}
return context
class SqlReviewPreventObject(BaseHandler):
@temRes
def get(self, request):
records = self.mongo_client.get_collection("exclude_obj_info").find({})
temp = []
if records:
for value in records:
if value["DB_TYPE"] == "O":
db_type = "oracle"
else:
db_type = "mysql"
if value.get("INSTANCE_NAME", None):
port_or_name = value["INSTANCE_NAME"]
else:
port_or_name = value["port"]
temp.append([
value["OWNER"],
value["OBJECT_NAME"],
value["OBJECT_TYPE"],
db_type, value["DB_IP"],
port_or_name, value["OBJECT_NAME"]
])
context = {"message": u"查询成功", "errcode": 80052, "data": temp}
return context
def post(self, request):
pass
class SqlReviewTaskPublish(BaseHandler):
@staticmethod
def run(**kwargs):
"""
执行任务
:return:
"""
themis = Themis(instance_name=kwargs.get('instance_name'),
username=kwargs.get('username'),
rule_type=kwargs.get('rule_type').upper(),
start_date=kwargs.get('start_date'),
stop_date=kwargs.get('stop_date'),
create_user=kwargs.get('create_user'))
job_record = themis.run()
themis.save_result(job_record)
@temRes
def post(self, request):
kwargs = {
'instance_name': request.POST.get('instance_name'),
'username': request.POST.get('db_name'),
'rule_type': request.POST.get('rule_type'),
'start_date': request.POST.get('start_date'),
'stop_date': request.POST.get('stop_date'),
'create_user': request.user.display
}
p = Process(target=self.run, kwargs=kwargs)
p.start()
context = {
"errcode": 80058,
"message": u"任务发布成功"
}
return context
|
chatcommunicate.py
|
import random
from threading import Thread, Lock
from parsing import *
from datahandling import *
from metasmoke import Metasmoke
from globalvars import GlobalVars
import os
import re
import requests
from datetime import datetime
from utcdate import UtcDate
from apigetpost import api_get_post
from spamhandling import handle_spam, handle_user_with_all_spam
from termcolor import colored
from findspam import FindSpam
from deletionwatcher import DeletionWatcher
from ChatExchange.chatexchange.messages import Message
# Please note: If new !!/ commands are added or existing ones are modified, don't forget to
# update the wiki at https://github.com/Charcoal-SE/SmokeDetector/wiki/Commands.
add_latest_message_lock = Lock()
def post_message_in_room(room_id_str, msg, length_check=True):
if room_id_str == GlobalVars.charcoal_room_id:
GlobalVars.charcoal_hq.send_message(msg, length_check)
elif room_id_str == GlobalVars.meta_tavern_room_id:
GlobalVars.tavern_on_the_meta.send_message(msg, length_check)
elif room_id_str == GlobalVars.socvr_room_id:
GlobalVars.socvr.send_message(msg, length_check)
def is_smokedetector_message(user_id, room_id):
return user_id == GlobalVars.smokeDetector_user_id[room_id]
def add_to_listen_if_edited(host, message_id):
if host + str(message_id) not in GlobalVars.listen_to_these_if_edited:
GlobalVars.listen_to_these_if_edited.append(host + str(message_id))
if len(GlobalVars.listen_to_these_if_edited) > 500:
GlobalVars.listen_to_these_if_edited = GlobalVars.listen_to_these_if_edited[-500:]
def print_chat_message(ev):
message = colored("Chat message in " + ev.data["room_name"] + " (" + str(ev.data["room_id"]) + "): \"", attrs=['bold'])
message += ev.data['content']
message += "\""
print message + colored(" - " + ev.data['user_name'], attrs=['bold'])
def special_room_watcher(ev, wrap2):
if ev.type_id != 1:
return
ev_user_id = str(ev.data["user_id"])
content_source = ev.message.content_source
if is_smokedetector_message(ev_user_id, GlobalVars.charcoal_room_id):
post_site_id = fetch_post_id_and_site_from_msg_content(content_source)
post_url = fetch_post_url_from_msg_content(content_source)
if post_site_id is not None and post_url is not None:
t_check_websocket = Thread(target=DeletionWatcher.check_if_report_was_deleted, args=(post_site_id, post_url, ev.message))
t_check_websocket.daemon = True
t_check_websocket.start()
def watcher(ev, wrap2):
if ev.type_id != 1 and ev.type_id != 2:
return
if ev.type_id == 2 and (wrap2.host + str(ev.message.id)) not in GlobalVars.listen_to_these_if_edited:
return
print_chat_message(ev)
ev_room = str(ev.data["room_id"])
ev_user_id = str(ev.data["user_id"])
ev_room_name = ev.data["room_name"].encode('utf-8')
if ev.type_id == 2:
ev.message = Message(ev.message.id, wrap2)
content_source = ev.message.content_source
message_id = ev.message.id
if is_smokedetector_message(ev_user_id, ev_room):
add_latest_message_lock.acquire()
add_latest_smokedetector_message(ev_room, message_id)
add_latest_message_lock.release()
post_site_id = fetch_post_id_and_site_from_msg_content(content_source)
post_url = fetch_post_url_from_msg_content(content_source)
if post_site_id is not None and (ev_room == GlobalVars.meta_tavern_room_id or ev_room == GlobalVars.socvr_room_id):
t_check_websocket = Thread(target=DeletionWatcher.check_if_report_was_deleted, args=(post_site_id, post_url, ev.message))
t_check_websocket.daemon = True
t_check_websocket.start()
message_parts = re.split('[ ,]+', content_source)
ev_user_name = ev.data["user_name"]
ev_user_link = "//chat.{host}/users/{user_id}".format(host=wrap2.host, user_id=ev.user.id)
if ev_user_name != "SmokeDetector":
GlobalVars.users_chatting[ev_room].append((ev_user_name, ev_user_link))
shortcut_messages = []
if message_parts[0].lower() == "sd":
message_parts = preprocess_shortcut_command(content_source).split(" ")
latest_smokedetector_messages = GlobalVars.latest_smokedetector_messages[ev_room]
commands = message_parts[1:]
if len(latest_smokedetector_messages) == 0:
ev.message.reply("I don't have any messages posted after the latest reboot.")
return
if len(commands) > len(latest_smokedetector_messages):
ev.message.reply("I've only posted {} messages since the latest reboot; that's not enough to execute all commands. No commands were executed.".format(len(latest_smokedetector_messages)))
return
for i in xrange(0, len(commands)):
shortcut_messages.append(u":{message} {command_name}".format(message=latest_smokedetector_messages[-(i + 1)], command_name=commands[i]))
reply = ""
amount_none = 0
amount_skipped = 0
amount_unrecognized = 0
length = len(shortcut_messages)
for i in xrange(0, length):
current_message = shortcut_messages[i]
if length > 1:
reply += str(i + 1) + ". "
reply += u"[{0}] ".format(current_message.split(" ")[0])
if current_message.split(" ")[1] != "-":
result = handle_commands(current_message.lower(), current_message.split(" "), ev_room, ev_room_name, ev_user_id, ev_user_name, wrap2, current_message, message_id)
r = result
if type(result) == tuple:
result = result[1]
if result is not None and result is not False:
reply += result + os.linesep
elif result is None:
reply += "<processed without return value>" + os.linesep
amount_none += 1
elif result is False or r[0] is False:
reply += "<unrecognized command>" + os.linesep
amount_unrecognized += 1
else:
reply += "<skipped>" + os.linesep
amount_skipped += 1
if amount_unrecognized == length:
add_to_listen_if_edited(wrap2.host, message_id)
if amount_none + amount_skipped + amount_unrecognized == length:
reply = ""
reply = reply.strip()
if reply != "":
message_with_reply = u":{} {}".format(message_id, reply)
if len(message_with_reply) <= 500 or "\n" in reply:
ev.message.reply(reply, False)
else:
result = handle_commands(content_source.lower(), message_parts, ev_room, ev_room_name, ev_user_id, ev_user_name, wrap2, content_source, message_id)
if type(result) != tuple:
result = (True, result)
if result[1] is not None:
if wrap2.host + str(message_id) in GlobalVars.listen_to_these_if_edited:
GlobalVars.listen_to_these_if_edited.remove(wrap2.host + str(message_id))
message_with_reply = u":{} {}".format(message_id, result[1])
if len(message_with_reply) <= 500 or "\n" in result[1]:
ev.message.reply(result[1], False)
if result[0] is False:
add_to_listen_if_edited(wrap2.host, message_id)
def handle_commands(content_lower, message_parts, ev_room, ev_room_name, ev_user_id, ev_user_name, wrap2, content, message_id):
if content_lower.startswith("!!/parse") \
and is_privileged(ev_room, ev_user_id, wrap2):
string_to_parse = content[9:]
print string_to_parse
response = requests.get("http://localhost:8000/?q=" + string_to_parse)
print response.text
GlobalVars.charcoal_hq.send_message(' ' + ('\n ').join(response.text.split('\n')), False)
return
message_url = "//chat.{host}/transcript/message/{id}#{id}".format(host=wrap2.host, id=message_id)
second_part_lower = "" if len(message_parts) < 2 else message_parts[1].lower()
if second_part_lower in ["f", "notspam"]:
second_part_lower = "fp-"
if second_part_lower in ["k", "spam", "rude", "abuse", "abusive", "offensive"]:
second_part_lower = "tpu-"
if second_part_lower == "n":
second_part_lower = "naa-"
if re.compile("^:[0-9]{4,}$").search(message_parts[0]):
msg_id = int(message_parts[0][1:])
msg = wrap2.get_message(msg_id)
msg_content = msg.content_source
quiet_action = ("-" in second_part_lower)
if str(msg.owner.id) != GlobalVars.smokeDetector_user_id[ev_room] or msg_content is None:
return
post_url = fetch_post_url_from_msg_content(msg_content)
post_site_id = fetch_post_id_and_site_from_msg_content(msg_content)
if post_site_id is not None:
post_type = post_site_id[2]
else:
post_type = None
if (second_part_lower.startswith("false") or second_part_lower.startswith("fp")) \
and is_privileged(ev_room, ev_user_id, wrap2):
if post_site_id is None:
return "That message is not a report."
t_metasmoke = Thread(target=Metasmoke.send_feedback_for_post,
args=(post_url, second_part_lower, ev_user_name, ev_user_id, ))
t_metasmoke.start()
add_false_positive((post_site_id[0], post_site_id[1]))
user_added = False
user_removed = False
url_from_msg = fetch_owner_url_from_msg_content(msg_content)
user = None
if url_from_msg is not None:
user = get_user_from_url(url_from_msg)
if second_part_lower.startswith("falseu") or second_part_lower.startswith("fpu"):
if user is not None:
add_whitelisted_user(user)
user_added = True
if "Blacklisted user:" in msg_content:
if user is not None:
remove_blacklisted_user(user)
user_removed = True
if post_type == "question":
if user_added and not quiet_action:
return "Registered question as false positive and whitelisted user."
elif user_removed and not quiet_action:
return "Registered question as false positive and removed user from the blacklist."
elif not quiet_action:
return "Registered question as false positive."
elif post_type == "answer":
if user_added and not quiet_action:
return "Registered answer as false positive and whitelisted user."
elif user_removed and not quiet_action:
return "Registered answer as false positive and removed user from the blacklist."
elif not quiet_action:
return "Registered answer as false positive."
try:
msg.delete()
except:
pass
if (second_part_lower.startswith("true") or second_part_lower.startswith("tp")) \
and is_privileged(ev_room, ev_user_id, wrap2):
if post_site_id is None:
return "That message is not a report."
t_metasmoke = Thread(target=Metasmoke.send_feedback_for_post,
args=(post_url, second_part_lower, ev_user_name, ev_user_id, ))
t_metasmoke.start()
user_added = False
if second_part_lower.startswith("trueu") or second_part_lower.startswith("tpu"):
url_from_msg = fetch_owner_url_from_msg_content(msg_content)
if url_from_msg is not None:
user = get_user_from_url(url_from_msg)
if user is not None:
add_blacklisted_user(user, message_url, "http:" + post_url)
user_added = True
if post_type == "question":
if quiet_action:
return None
if user_added:
return "Blacklisted user and registered question as true positive."
return "Recorded question as true positive in metasmoke. Use `tpu` or `trueu` if you want to blacklist a user."
elif post_type == "answer":
if quiet_action:
return None
if user_added:
return "Blacklisted user."
return "Recorded answer as true positive in metasmoke. If you want to blacklist the poster of the answer, use `trueu` or `tpu`."
if second_part_lower.startswith("ignore") and is_privileged(ev_room, ev_user_id, wrap2):
if post_site_id is None:
return "That message is not a report."
t_metasmoke = Thread(target=Metasmoke.send_feedback_for_post,
args=(post_url, second_part_lower, ev_user_name, ev_user_id,))
t_metasmoke.start()
add_ignored_post(post_site_id[0:2])
if not quiet_action:
return "Post ignored; alerts about it will no longer be posted."
else:
return None
if second_part_lower.startswith("naa") and is_privileged(ev_room, ev_user_id, wrap2):
if post_site_id is None:
return "That message is not a report."
if post_type != "answer":
return "That report was a question; questions cannot be marked as NAAs."
t_metasmoke = Thread(target=Metasmoke.send_feedback_for_post,
args=(post_url, second_part_lower, ev_user_name, ev_user_id, ))
t_metasmoke.start()
add_ignored_post(post_site_id[0:2])
if quiet_action:
return None
return "Recorded answer as an NAA in metasmoke."
if (second_part_lower.startswith("delete") or second_part_lower.startswith("remove") or second_part_lower.startswith("gone") or second_part_lower.startswith("poof") or second_part_lower == "del") and is_privileged(ev_room, ev_user_id, wrap2):
try:
msg.delete()
except:
pass # couldn't delete message
if second_part_lower.startswith("postgone") and is_privileged(ev_room, ev_user_id, wrap2):
edited = edited_message_after_postgone_command(msg_content)
if edited is None:
return "That's not a report."
msg.edit(edited)
return None
if second_part_lower.startswith("why"):
post_info = fetch_post_id_and_site_from_msg_content(msg_content)
if post_info is None:
post_info = fetch_user_from_allspam_report(msg_content)
if post_info is None:
return "That's not a report."
why = get_why_allspam(post_info)
if why is not None or why != "":
return why
else:
post_id, site, _ = post_info
why = get_why(site, post_id)
if why is not None or why != "":
return why
return "There is no `why` data for that user (anymore)."
if content_lower.startswith("!!/addblu") \
and is_privileged(ev_room, ev_user_id, wrap2):
uid, val = get_user_from_list_command(content_lower)
if uid > -1 and val != "":
add_blacklisted_user((uid, val), message_url, "")
return "User blacklisted (`{}` on `{}`).".format(uid, val)
elif uid == -2:
return "Error: {}".format(val)
else:
return "Invalid format. Valid format: `!!/addblu profileurl` *or* `!!/addblu userid sitename`."
if content_lower.startswith("!!/rmblu") \
and is_privileged(ev_room, ev_user_id, wrap2):
uid, val = get_user_from_list_command(content_lower)
if uid > -1 and val != "":
if remove_blacklisted_user((uid, val)):
return "User removed from blacklist (`{}` on `{}`).".format(uid, val)
else:
return "User is not blacklisted."
elif uid == -2:
return "Error: {}".format(val)
else:
return False, "Invalid format. Valid format: `!!/rmblu profileurl` *or* `!!/rmblu userid sitename`."
if content_lower.startswith("!!/isblu"):
uid, val = get_user_from_list_command(content_lower)
if uid > -1 and val != "":
if is_blacklisted_user((uid, val)):
return "User is blacklisted (`{}` on `{}`).".format(uid, val)
else:
return "User is not blacklisted (`{}` on `{}`).".format(uid, val)
elif uid == -2:
return "Error: {}".format(val)
else:
return False, "Invalid format. Valid format: `!!/isblu profileurl` *or* `!!/isblu userid sitename`."
if content_lower.startswith("!!/addwlu") \
and is_privileged(ev_room, ev_user_id, wrap2):
uid, val = get_user_from_list_command(content_lower)
if uid > -1 and val != "":
add_whitelisted_user((uid, val))
return "User whitelisted (`{}` on `{}`).".format(uid, val)
elif uid == -2:
return "Error: {}".format(val)
else:
return False, "Invalid format. Valid format: `!!/addwlu profileurl` *or* `!!/addwlu userid sitename`."
if content_lower.startswith("!!/rmwlu") \
and is_privileged(ev_room, ev_user_id, wrap2):
uid, val = get_user_from_list_command(content_lower)
if uid != -1 and val != "":
if remove_whitelisted_user((uid, val)):
return "User removed from whitelist (`{}` on `{}`).".format(uid, val)
else:
return "User is not whitelisted."
elif uid == -2:
return "Error: {}".format(val)
else:
return False, "Invalid format. Valid format: `!!/rmwlu profileurl` *or* `!!/rmwlu userid sitename`."
if content_lower.startswith("!!/iswlu"):
uid, val = get_user_from_list_command(content_lower)
if uid > -1 and val != "":
if is_whitelisted_user((uid, val)):
return "User is whitelisted (`{}` on `{}`).".format(uid, val)
else:
return "User is not whitelisted (`{}` on `{}`).".format(uid, val)
elif uid == -2:
return "Error: {}".format(val)
else:
return False, "Invalid format. Valid format: `!!/iswlu profileurl` *or* `!!/iswlu userid sitename`."
if (content_lower.startswith("!!/allspam") or content_lower.startswith("!!/reportuser")) and is_privileged(ev_room, ev_user_id, wrap2):
if len(message_parts) != 2:
return False, "1 argument expected"
url = message_parts[1]
user = get_user_from_url(url)
if user is None:
return "That doesn't look like a valid user URL."
why = u"User manually reported by *{}* in room *{}*.\n".format(ev_user_name, ev_room_name.decode('utf-8'))
handle_user_with_all_spam(user, why)
return None
if content_lower.startswith("!!/report") \
and is_privileged(ev_room, ev_user_id, wrap2):
crn, wait = can_report_now(ev_user_id, wrap2.host)
if not crn:
return "You can execute the !!/report command again in {} seconds. " \
"To avoid one user sending lots of reports in a few commands and slowing SmokeDetector down due to rate-limiting, " \
"you have to wait 30 seconds after you've reported multiple posts using !!/report, even if your current command just has one URL. " \
"(Note that this timeout won't be applied if you only used !!/report for one post)".format(wait)
if len(message_parts) < 2:
return False, "Not enough arguments."
output = []
index = 0
urls = list(set(message_parts[1:]))
if len(urls) > 5:
return False, "To avoid SmokeDetector reporting posts too slowly, " \
"you can report at most 5 posts at a time. " \
"This is to avoid SmokeDetector's chat messages getting rate-limited too much, " \
"which would slow down reports."
for url in urls:
index += 1
post_data = api_get_post(url)
if post_data is None:
output.append("Post {}: That does not look like a valid post URL.".format(index))
continue
if post_data is False:
output.append("Post {}: Could not find data for this post in the API. It may already have been deleted.".format(index))
continue
user = get_user_from_url(post_data.owner_url)
if user is not None:
add_blacklisted_user(user, message_url, post_data.post_url)
why = u"Post manually reported by user *{}* in room *{}*.\n".format(ev_user_name, ev_room_name.decode('utf-8'))
batch = ""
if len(urls) > 1:
batch = " (batch report: post {} out of {})".format(index, len(urls))
handle_spam(post_data.title, post_data.body, post_data.owner_name, post_data.site, post_data.post_url,
post_data.owner_url, post_data.post_id, ["Manually reported " + post_data.post_type + batch],
post_data.post_type == "answer", why, post_data.owner_rep, post_data.score, post_data.up_vote_count,
post_data.down_vote_count, post_data.question_id)
if 1 < len(urls) > len(output):
add_or_update_multiple_reporter(ev_user_id, wrap2.host, time.time())
if len(output) > 0:
return os.linesep.join(output)
return None
if content_lower.startswith("!!/wut"):
return "Whaddya mean, 'wut'? Humans..."
if content_lower.startswith("!!/lick"):
return "*licks ice cream cone*"
if content_lower.startswith("!!/alive"):
if ev_room == GlobalVars.charcoal_room_id:
return 'Of course'
elif ev_room == GlobalVars.meta_tavern_room_id or ev_room == GlobalVars.socvr_room_id:
return random.choice(['Yup', 'You doubt me?', 'Of course', '... did I miss something?', 'plz send teh coffee', 'Watching this endless list of new questions *never* gets boring', 'Kinda sorta'])
if content_lower.startswith("!!/rev") or content_lower.startswith("!!/ver"):
return '[{commit_name}](https://github.com/Charcoal-SE/SmokeDetector/commit/{commit_code})'.format(commit_name=GlobalVars.commit_with_author, commit_code=GlobalVars.commit)
if content_lower.startswith("!!/status"):
now = datetime.utcnow()
diff = now - UtcDate.startup_utc_date
minutes, remainder = divmod(diff.seconds, 60)
minute_str = "minutes" if minutes != 1 else "minute"
return 'Running since {time} UTC ({minute_count} {plurality})'.format(time=GlobalVars.startup_utc, minute_count=minutes, plurality=minute_str)
if content_lower.startswith("!!/reboot"):
if is_privileged(ev_room, ev_user_id, wrap2):
post_message_in_room(ev_room, "Goodbye, cruel world")
os._exit(5)
if content_lower.startswith("!!/stappit"):
if is_privileged(ev_room, ev_user_id, wrap2):
post_message_in_room(ev_room, "Goodbye, cruel world")
os._exit(6)
if content_lower.startswith("!!/master"):
if is_privileged(ev_room, ev_user_id, wrap2):
os._exit(8)
if content_lower.startswith("!!/clearbl"):
if is_privileged(ev_room, ev_user_id, wrap2):
if os.path.isfile("blacklistedUsers.txt"):
os.remove("blacklistedUsers.txt")
GlobalVars.blacklisted_users = []
return "Kaboom, blacklisted users cleared."
return "There are no blacklisted users at the moment."
if content_lower.startswith("!!/block") and is_privileged(ev_room, ev_user_id, wrap2):
room_id = message_parts[2] if len(message_parts) > 2 else "all"
timeToBlock = message_parts[1] if len(message_parts) > 1 else "0"
if not timeToBlock.isdigit():
return False, "Invalid duration."
timeToBlock = int(timeToBlock)
timeToBlock = timeToBlock if 0 < timeToBlock < 14400 else 900
GlobalVars.blockedTime[room_id] = time.time() + timeToBlock
which_room = "globally" if room_id == "all" else "in room " + room_id
report = "Reports blocked for {} seconds {}.".format(timeToBlock, which_room)
if room_id != GlobalVars.charcoal_room_id:
GlobalVars.charcoal_hq.send_message(report)
return report
if content_lower.startswith("!!/unblock") and is_privileged(ev_room, ev_user_id, wrap2):
room_id = message_parts[2] if len(message_parts) > 2 else "all"
GlobalVars.blockedTime[room_id] = time.time()
which_room = "globally" if room_id == "all" else "in room " + room_id
report = "Reports unblocked {}.".format(GlobalVars.blockedTime - time.time(), which_room)
if room_id != GlobalVars.charcoal_room_id:
GlobalVars.charcoal_hq.send_message(report)
return report
if content_lower.startswith("!!/errorlogs"):
if is_privileged(ev_room, ev_user_id, wrap2):
count = -1
if len(message_parts) != 2:
return "The !!/errorlogs command requires 1 argument."
try:
count = int(message_parts[1])
except ValueError:
pass
if count == -1:
return "Invalid argument."
logs_part = fetch_lines_from_error_log(count)
post_message_in_room(ev_room, logs_part, False)
if content_lower.startswith("!!/pull"):
if is_privileged(ev_room, ev_user_id, wrap2):
request = requests.get('https://api.github.com/repos/Charcoal-SE/SmokeDetector/git/refs/heads/master')
latest_sha = request.json()["object"]["sha"]
request = requests.get('https://api.github.com/repos/Charcoal-SE/SmokeDetector/commits/{commit_code}/statuses'.format(commit_code=latest_sha))
states = []
for status in request.json():
state = status["state"]
states.append(state)
if "success" in states:
os._exit(3)
elif "error" in states or "failure" in states:
return "CI build failed! :( Please check your commit."
elif "pending" in states or not states:
return "CI build is still pending, wait until the build has finished and then pull again."
if content_lower.startswith("!!/help") or content_lower.startswith("!!/info"):
return "I'm [SmokeDetector](https://github.com/Charcoal-SE/SmokeDetector), a bot "\
"that detects spam and offensive posts on the network and posts alerts to chat. "\
"[A command list is available here](https://github.com/Charcoal-SE/SmokeDetector/wiki/Commands)."
if content_lower.startswith("!!/apiquota"):
return "The current API quota remaining is {}.".format(GlobalVars.apiquota)
if content_lower.startswith("!!/whoami"):
if (ev_room in GlobalVars.smokeDetector_user_id):
return "My id for this room is {}.".format(GlobalVars.smokeDetector_user_id[ev_room])
return "I don't know my user ID for this room. (Something is wrong, and it's apnorton's fault.)"
if content_lower.startswith("!!/location"):
return GlobalVars.location
if content_lower.startswith("!!/queuestatus"):
post_message_in_room(ev_room, GlobalVars.bodyfetcher.print_queue(), False)
if content_lower.startswith("!!/blame"):
GlobalVars.users_chatting[ev_room] = list(set(GlobalVars.users_chatting[ev_room])) # Make unique
user_to_blame = random.choice(GlobalVars.users_chatting[ev_room])
return u"It's [{}]({})'s fault.".format(user_to_blame[0], user_to_blame[1])
if content_lower.startswith("!!/coffee"):
return "*brews coffee for @" + ev_user_name.replace(" ", "") + "*"
if content_lower.startswith("!!/tea"):
return "*brews a cup of {choice} tea for @{user}*".format(choice=random.choice(['earl grey', 'green', 'chamomile', 'lemon', 'darjeeling', 'mint', 'jasmine']), user=ev_user_name.replace(" ", ""))
if content_lower.startswith("!!/brownie"):
return "Brown!"
if content_lower.startswith("!!/hats"):
wb_end = datetime(2016, 1, 4, 0, 0, 0)
now = datetime.utcnow()
if wb_end > now:
diff = wb_end - now
hours, remainder = divmod(diff.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
daystr = "days" if diff.days != 1 else "day"
hourstr = "hours" if hours != 1 else "hour"
minutestr = "minutes" if minutes != 1 else "minute"
secondstr = "seconds" if seconds != 1 else "second"
return "HURRY UP AND EARN MORE HATS! Winterbash will be over in {} {}, {} {}, {} {}, and {} {}. :(".format(diff.days, daystr, hours, hourstr, minutes, minutestr, seconds, secondstr)
return "Winterbash is over. :("
if content_lower.startswith("!!/test"):
string_to_test = content[8:]
test_as_answer = False
if content_lower.startswith("!!/test-a"):
string_to_test = content[10:]
test_as_answer = True
if len(string_to_test) == 0:
return "Nothing to test"
result = "> "
reasons, why = FindSpam.test_post(string_to_test, string_to_test, string_to_test, "", test_as_answer, False, 1, 0)
if len(reasons) == 0:
result += "Would not be caught for title, {}, and username.".format("answer" if test_as_answer else "body")
return result
result += ", ".join(reasons).capitalize()
if why is not None and len(why) > 0:
result += "\n----------\n"
result += why
return result
if content_lower.startswith("!!/amiprivileged"):
if is_privileged(ev_room, ev_user_id, wrap2):
return "Yes, you are a privileged user."
return "No, you are not a privileged user."
if content_lower.startswith("!!/notify"):
if len(message_parts) != 3:
return False, "2 arguments expected"
user_id = int(ev_user_id)
chat_site = wrap2.host
room_id = message_parts[1]
if not room_id.isdigit():
return False, "Room ID is invalid."
room_id = int(room_id)
quiet_action = ("-" in message_parts[2])
se_site = message_parts[2].replace('-', '')
response, full_site = add_to_notification_list(user_id, chat_site, room_id, se_site)
if response == 0:
if quiet_action:
return None
return "You'll now get pings from me if I report a post on `{site_name}`, in room `{room_id}` on `chat.{chat_domain}`".format(site_name=se_site, room_id=room_id, chat_domain=chat_site)
elif response == -1:
return "That notification configuration is already registered."
elif response == -2:
return False, "The given SE site does not exist."
if content_lower.startswith("!!/unnotify"):
if len(message_parts) != 3:
return False, "2 arguments expected"
user_id = int(ev_user_id)
chat_site = wrap2.host
room_id = message_parts[1]
if not room_id.isdigit():
return False, "Room ID is invalid."
room_id = int(room_id)
quiet_action = ("-" in message_parts[2])
se_site = message_parts[2].replace('-', '')
response = remove_from_notification_list(user_id, chat_site, room_id, se_site)
if response:
if quiet_action:
return None
return "I will no longer ping you if I report a post on `{site_name}`, in room `{room_id}` on `chat.{chat_domain}`".format(site_name=se_site, room_id=room_id, chat_domain=chat_site)
return "That configuration doesn't exist."
if content_lower.startswith("!!/willibenotified"):
if len(message_parts) != 3:
return False, "2 arguments expected"
user_id = int(ev_user_id)
chat_site = wrap2.host
room_id = message_parts[1]
if not room_id.isdigit():
return False, "Room ID is invalid"
room_id = int(room_id)
se_site = message_parts[2]
will_be_notified = will_i_be_notified(user_id, chat_site, room_id, se_site)
if will_be_notified:
return "Yes, you will be notified for that site in that room."
return "No, you won't be notified for that site in that room."
if content_lower.startswith("!!/allnotificationsites"):
if len(message_parts) != 2:
return False, "1 argument expected"
user_id = int(ev_user_id)
chat_site = wrap2.host
room_id = message_parts[1]
if not room_id.isdigit():
return False, "Room ID is invalid."
sites = get_all_notification_sites(user_id, chat_site, room_id)
if len(sites) == 0:
return "You won't get notified for any sites in that room."
return "You will get notified for these sites:\r\n" + ", ".join(sites)
return False, None # Unrecognized command, can be edited later.
|
application.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import logging
import subprocess
import tempfile
import textwrap
import threading
from pathlib import Path
from typing import IO, List
from flask import Flask, request, jsonify
from flask_cors import CORS
logging.basicConfig(
format="%(asctime)s %(levelname)s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.DEBUG,
)
LOG: logging.Logger = logging.getLogger(__name__)
def _consume(stream: IO[str]) -> str:
buffer: List[str] = []
def _consume() -> None:
while True:
line = stream.readline()
if line:
decoded = line.strip()
LOG.debug(decoded)
buffer.append(decoded)
else:
break
thread = threading.Thread(target=_consume)
thread.start()
thread.join()
return "\n".join(buffer)
class Pyre:
def __init__(self) -> None:
self._directory: Path = Path(tempfile.mkdtemp())
LOG.debug(f"Starting server in `{self._directory}`...")
pyre_configuration = textwrap.dedent(
f"""
{{
"source_directories": ["."]
}}
"""
)
LOG.debug(f"Writing configuration:\n{pyre_configuration}")
pyre_configuration_path = self._directory / ".pyre_configuration"
pyre_configuration_path.write_text(pyre_configuration)
LOG.debug(f"Writing watchman configuration")
watchman_configuration_path = self._directory / ".watchmanconfig"
watchman_configuration_path.write_text("{}\n")
LOG.debug(f"Starting watchman")
subprocess.check_call(["watchman", "watch", str(self._directory)])
LOG.debug(f"Priming the server")
# TODO(T82114844): incremental is borked on Ubuntu 20.04.
subprocess.check_call(
["pyre", "--noninteractive", "check"], cwd=self._directory
)
def check(self, input: str) -> str:
LOG.debug(f"Writing code:\n{input}")
code_path = self._directory / "input.py"
code_path.write_text(input)
# TODO(T82114844): incremental is borked on Ubuntu 20.04.
with subprocess.Popen(
["pyre", "--output=json", "--noninteractive", "check"],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
cwd=self._directory,
text=True,
) as process:
# pyre-fixme[6]: Expected `IO[bytes]` for 1st param but got
# `Optional[IO[typing.Any]]`.
stderr = _consume(process.stderr)
# pyre-fixme[6]: Expected `IO[bytes]` for 1st param but got
# `Optional[IO[typing.Any]]`.
stdout = _consume(process.stdout)
return_code = process.wait()
if return_code > 1:
LOG.error(f"Returning error: {stderr}")
result = jsonify(errors=[stderr])
else:
errors = json.loads(stdout)
result = jsonify(data={"errors": errors, "stderr": stderr})
return result
pyre = Pyre()
application = Flask(__name__)
CORS(application)
@application.route("/check", methods=["GET", "POST"])
def check() -> str:
input = (
request.args.get("input")
or request.form.get("input")
or request.json.get("input")
)
if input is None:
return jsonify(errors=["Input not provided"])
LOG.info(f"Checking `{input}`...")
return pyre.check(input)
@application.route("/")
def index() -> str:
return "index"
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--debug", action="store_true")
arguments: argparse.Namespace = parser.parse_args()
application.debug = arguments.debug
application.run()
|
event.py
|
import threading
import time
def task(event, sec):
print("Started thread but waiting for event...")
# make the thread wait for event with timeout set
internal_set = event.wait(sec)
print("waiting")
if internal_set:
print("Event set")
else:
print("Time out,event not set")
# initializing the event object
e = threading.Event()
print("Event is set.")
# starting the thread
t1 = threading.Thread(name='Event-Blocking-Thread', target=task, args=(e,10))
t1.start()
t1.join()
e.set()
|
kvstore_test.py
|
from multiprocessing import Process
from .kvstore import KVStore
from .kvstore_server import KVStoreServer
def test_case1():
def server(port):
kvstore_server = KVStoreServer(port=port)
kvstore_server.request_handle()
def worker(ports):
kvstore_worker = KVStore(ports=ports)
kvstore_worker.push()
server_ports = range(5550, 5558, 2)
for server_port in server_ports:
Process(target=server, args=(server_port,)).start()
# Now we can connect a client to all these servers
Process(target=worker, args=(server_ports,)).start()
if __name__ == '__main__':
test_case1()
|
rbssh.py
|
#!/usr/bin/env python
#
# rbssh.py -- A custom SSH client for use in Review Board.
#
# This is used as an ssh replacement that can be used across platforms with
# a custom .ssh directory. OpenSSH doesn't respect $HOME, instead reading
# /etc/passwd directly, which causes problems for us. Using rbssh, we can
# work around this.
#
#
# Copyright (c) 2010-2011 Beanbag, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import getpass
import logging
import os
import select
import sys
from optparse import OptionParser
import paramiko
from reviewboard import get_version_string
from reviewboard.scmtools import sshutils
from reviewboard.scmtools.core import SCMTool
DEBUG = os.getenv('DEBUG_RBSSH')
DEBUG_LOGDIR = os.getenv('RBSSH_LOG_DIR')
options = None
class PlatformHandler(object):
def __init__(self, channel):
self.channel = channel
def shell(self):
raise NotImplementedError
def transfer(self):
raise NotImplementedError
def process_channel(self, channel):
if channel.closed:
return False
logging.debug('!! process_channel\n')
if channel.recv_ready():
data = channel.recv(4096)
if not data:
logging.debug('!! stdout empty\n')
return False
sys.stdout.write(data)
sys.stdout.flush()
if channel.recv_stderr_ready():
data = channel.recv_stderr(4096)
if not data:
logging.debug('!! stderr empty\n')
return False
sys.stderr.write(data)
sys.stderr.flush()
if channel.exit_status_ready():
logging.debug('!!! exit_status_ready\n')
return False
return True
def process_stdin(self, channel):
logging.debug('!! process_stdin\n')
try:
buf = os.read(sys.stdin.fileno(), 1)
except OSError:
buf = None
if not buf:
logging.debug('!! stdin empty\n')
return False
channel.send(buf)
return True
class PosixHandler(PlatformHandler):
def shell(self):
import termios
import tty
oldtty = termios.tcgetattr(sys.stdin)
try:
tty.setraw(sys.stdin.fileno())
tty.setcbreak(sys.stdin.fileno())
self.handle_communications()
finally:
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, oldtty)
def transfer(self):
import fcntl
fd = sys.stdin.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
self.handle_communications()
def handle_communications(self):
while True:
rl, wl, el = select.select([self.channel, sys.stdin], [], [])
if self.channel in rl:
if not self.process_channel(self.channel):
break
if sys.stdin in rl:
if not self.process_stdin(self.channel):
self.channel.shutdown_write()
break
class WindowsHandler(PlatformHandler):
def shell(self):
self.handle_communications()
def transfer(self):
self.handle_communications()
def handle_communications(self):
import threading
logging.debug('!! begin_windows_transfer\n')
self.channel.setblocking(0)
def writeall(channel):
while self.process_channel(channel):
pass
logging.debug('!! Shutting down reading\n')
channel.shutdown_read()
writer = threading.Thread(target=writeall, args=(self.channel,))
writer.start()
try:
while self.process_stdin(self.channel):
pass
except EOFError:
pass
logging.debug('!! Shutting down writing\n')
self.channel.shutdown_write()
def print_version(option, opt, value, parser):
parser.print_version()
sys.exit(0)
def parse_options(args):
global options
hostname = None
parser = OptionParser(usage='%prog [options] [user@]hostname [command]',
version='%prog ' + get_version_string())
parser.disable_interspersed_args()
parser.add_option('-l',
dest='username', metavar='USERNAME', default=None,
help='the user to log in as on the remote machine')
parser.add_option('-p', '--port',
type='int', dest='port', metavar='PORT', default=None,
help='the port to connect to')
parser.add_option('-q', '--quiet',
action='store_true', dest='quiet', default=False,
help='suppress any unnecessary output')
parser.add_option('-s',
dest='subsystem', metavar='SUBSYSTEM', default=None,
nargs=2,
help='the subsystem to use (ssh or sftp)')
parser.add_option('-V',
action='callback', callback=print_version,
help='display the version information and exit')
parser.add_option('--rb-disallow-agent',
action='store_false', dest='allow_agent',
default=os.getenv('RBSSH_ALLOW_AGENT') != '0',
help='disable using the SSH agent for authentication')
parser.add_option('--rb-local-site',
dest='local_site_name', metavar='NAME',
default=os.getenv('RB_LOCAL_SITE'),
help='the local site name containing the SSH keys to use')
(options, args) = parser.parse_args(args)
if options.subsystem:
if len(options.subsystem) != 2:
parser.error('-s requires a hostname and a valid subsystem')
elif options.subsystem[1] not in ('sftp', 'ssh'):
parser.error('Invalid subsystem %s' % options.subsystem[1])
hostname, options.subsystem = options.subsystem
if len(args) == 0 and not hostname:
parser.print_help()
sys.exit(1)
if not hostname:
hostname = args[0]
args = args[1:]
return hostname, args
def main():
if DEBUG:
pid = os.getpid()
log_filename = 'rbssh-%s.log' % pid
if DEBUG_LOGDIR:
log_path = os.path.join(DEBUG_LOGDIR, log_filename)
else:
log_path = log_filename
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-18s %(levelname)-8s '
'%(message)s',
datefmt='%m-%d %H:%M',
filename=log_path,
filemode='w')
logging.debug('%s' % sys.argv)
logging.debug('PID %s' % pid)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(logging.Formatter('%(message)s'))
ch.addFilter(logging.Filter('root'))
logging.getLogger('').addHandler(ch)
path, command = parse_options(sys.argv[1:])
if '://' not in path:
path = 'ssh://' + path
username, hostname = SCMTool.get_auth_from_uri(path, options.username)
if username is None:
username = getpass.getuser()
logging.debug('!!! %s, %s, %s' % (hostname, username, command))
client = sshutils.get_ssh_client(options.local_site_name)
client.set_missing_host_key_policy(paramiko.WarningPolicy())
attempts = 0
password = None
key = sshutils.get_user_key(options.local_site_name)
while True:
try:
client.connect(hostname, username=username, password=password,
pkey=key, allow_agent=options.allow_agent)
break
except paramiko.AuthenticationException, e:
if attempts == 3 or not sys.stdin.isatty():
logging.error('Too many authentication failures for %s' %
username)
sys.exit(1)
attempts += 1
password = getpass.getpass("%s@%s's password: " %
(username, hostname))
except paramiko.SSHException, e:
logging.error('Error connecting to server: %s' % e)
sys.exit(1)
except Exception, e:
logging.error('Unknown exception during connect: %s (%s)' %
(e, type(e)))
sys.exit(1)
transport = client.get_transport()
channel = transport.open_session()
if sys.platform in ('cygwin', 'win32'):
logging.debug('!!! Using WindowsHandler')
handler = WindowsHandler(channel)
else:
logging.debug('!!! Using PosixHandler')
handler = PosixHandler(channel)
if options.subsystem == 'sftp':
logging.debug('!!! Invoking sftp subsystem')
channel.invoke_subsystem('sftp')
handler.transfer()
elif command:
logging.debug('!!! Sending command %s' % command)
channel.exec_command(' '.join(command))
handler.transfer()
else:
logging.debug('!!! Opening shell')
channel.get_pty()
channel.invoke_shell()
handler.shell()
logging.debug('!!! Done')
status = channel.recv_exit_status()
client.close()
return status
if __name__ == '__main__':
main()
# ... with blackjack, and hookers.
|
test_utils_test.py
|
import asyncio
import pathlib
import socket
import threading
from contextlib import contextmanager
from time import sleep
import pytest
from tornado import gen
from distributed import Client, Nanny, Scheduler, Worker, config, default_client
from distributed.core import rpc
from distributed.metrics import time
from distributed.utils import get_ip
from distributed.utils_test import (
_UnhashableCallable,
cluster,
gen_cluster,
gen_test,
inc,
new_config,
tls_only_security,
wait_for_port,
)
def test_bare_cluster(loop):
with cluster(nworkers=10) as (s, _):
pass
def test_cluster(loop):
with cluster() as (s, [a, b]):
with rpc(s["address"]) as s:
ident = loop.run_sync(s.identity)
assert ident["type"] == "Scheduler"
assert len(ident["workers"]) == 2
@gen_cluster(client=True)
async def test_gen_cluster(c, s, a, b):
assert isinstance(c, Client)
assert isinstance(s, Scheduler)
for w in [a, b]:
assert isinstance(w, Worker)
assert s.nthreads == {w.address: w.nthreads for w in [a, b]}
assert await c.submit(lambda: 123) == 123
@gen_cluster(client=True)
async def test_gen_cluster_pytest_fixture(c, s, a, b, tmp_path):
assert isinstance(tmp_path, pathlib.Path)
assert isinstance(c, Client)
assert isinstance(s, Scheduler)
for w in [a, b]:
assert isinstance(w, Worker)
@pytest.mark.parametrize("foo", [True])
@gen_cluster(client=True)
async def test_gen_cluster_parametrized(c, s, a, b, foo):
assert foo is True
assert isinstance(c, Client)
assert isinstance(s, Scheduler)
for w in [a, b]:
assert isinstance(w, Worker)
@pytest.mark.parametrize("foo", [True])
@pytest.mark.parametrize("bar", ["a", "b"])
@gen_cluster(client=True)
async def test_gen_cluster_multi_parametrized(c, s, a, b, foo, bar):
assert foo is True
assert bar in ("a", "b")
assert isinstance(c, Client)
assert isinstance(s, Scheduler)
for w in [a, b]:
assert isinstance(w, Worker)
@pytest.mark.parametrize("foo", [True])
@gen_cluster(client=True)
async def test_gen_cluster_parametrized_variadic_workers(c, s, *workers, foo):
assert foo is True
assert isinstance(c, Client)
assert isinstance(s, Scheduler)
for w in workers:
assert isinstance(w, Worker)
@gen_cluster(
client=True,
Worker=Nanny,
config={"distributed.comm.timeouts.connect": "1s", "new.config.value": "foo"},
)
async def test_gen_cluster_set_config_nanny(c, s, a, b):
def assert_config():
import dask
assert dask.config.get("distributed.comm.timeouts.connect") == "1s"
assert dask.config.get("new.config.value") == "foo"
return dask.config
await c.run(assert_config)
await c.run_on_scheduler(assert_config)
@gen_cluster(client=True)
def test_gen_cluster_legacy_implicit(c, s, a, b):
assert isinstance(c, Client)
assert isinstance(s, Scheduler)
for w in [a, b]:
assert isinstance(w, Worker)
assert s.nthreads == {w.address: w.nthreads for w in [a, b]}
assert (yield c.submit(lambda: 123)) == 123
@gen_cluster(client=True)
@gen.coroutine
def test_gen_cluster_legacy_explicit(c, s, a, b):
assert isinstance(c, Client)
assert isinstance(s, Scheduler)
for w in [a, b]:
assert isinstance(w, Worker)
assert s.nthreads == {w.address: w.nthreads for w in [a, b]}
assert (yield c.submit(lambda: 123)) == 123
@pytest.mark.skip(reason="This hangs on travis")
def test_gen_cluster_cleans_up_client(loop):
import dask.context
assert not dask.config.get("get", None)
@gen_cluster(client=True)
async def f(c, s, a, b):
assert dask.config.get("get", None)
await c.submit(inc, 1)
f()
assert not dask.config.get("get", None)
@gen_cluster()
async def test_gen_cluster_without_client(s, a, b):
assert isinstance(s, Scheduler)
for w in [a, b]:
assert isinstance(w, Worker)
assert s.nthreads == {w.address: w.nthreads for w in [a, b]}
async with Client(s.address, asynchronous=True) as c:
future = c.submit(lambda x: x + 1, 1)
result = await future
assert result == 2
@gen_cluster(
client=True,
scheduler="tls://127.0.0.1",
nthreads=[("tls://127.0.0.1", 1), ("tls://127.0.0.1", 2)],
security=tls_only_security(),
)
async def test_gen_cluster_tls(e, s, a, b):
assert isinstance(e, Client)
assert isinstance(s, Scheduler)
assert s.address.startswith("tls://")
for w in [a, b]:
assert isinstance(w, Worker)
assert w.address.startswith("tls://")
assert s.nthreads == {w.address: w.nthreads for w in [a, b]}
@gen_test()
async def test_gen_test():
await asyncio.sleep(0.01)
@gen_test()
def test_gen_test_legacy_implicit():
yield asyncio.sleep(0.01)
@gen_test()
@gen.coroutine
def test_gen_test_legacy_explicit():
yield asyncio.sleep(0.01)
@contextmanager
def _listen(delay=0):
serv = socket.socket()
serv.bind(("127.0.0.1", 0))
e = threading.Event()
def do_listen():
e.set()
sleep(delay)
serv.listen(5)
ret = serv.accept()
if ret is not None:
cli, _ = ret
cli.close()
serv.close()
t = threading.Thread(target=do_listen)
t.daemon = True
t.start()
try:
e.wait()
sleep(0.01)
yield serv
finally:
t.join(5.0)
def test_wait_for_port():
t1 = time()
with pytest.raises(RuntimeError):
wait_for_port((get_ip(), 9999), 0.5)
t2 = time()
assert t2 - t1 >= 0.5
with _listen(0) as s1:
t1 = time()
wait_for_port(s1.getsockname())
t2 = time()
assert t2 - t1 <= 1.0
with _listen(1) as s1:
t1 = time()
wait_for_port(s1.getsockname())
t2 = time()
assert t2 - t1 <= 2.0
def test_new_config():
c = config.copy()
with new_config({"xyzzy": 5}):
config["xyzzy"] == 5
assert config == c
assert "xyzzy" not in config
def test_lingering_client():
@gen_cluster()
async def f(s, a, b):
await Client(s.address, asynchronous=True)
f()
with pytest.raises(ValueError):
default_client()
def test_lingering_client_2(loop):
with cluster() as (s, [a, b]):
client = Client(s["address"], loop=loop)
def test_tls_cluster(tls_client):
tls_client.submit(lambda x: x + 1, 10).result() == 11
assert tls_client.security
@pytest.mark.asyncio
async def test_tls_scheduler(security, cleanup):
async with Scheduler(
security=security, host="localhost", dashboard_address=":0"
) as s:
assert s.address.startswith("tls")
def test__UnhashableCallable():
func = _UnhashableCallable()
assert func(1) == 2
with pytest.raises(TypeError, match="unhashable"):
hash(func)
|
strategy_engine.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from queue import Queue
from threading import Thread
from nanomsg import Socket, SUB, PUSH, SUB_SUBSCRIBE, SOL_SOCKET, RCVTIMEO
from datetime import datetime, timedelta, time
import os
import yaml
import json
from collections import defaultdict
from copy import copy
import traceback
import importlib
from typing import Any, Callable
from pathlib import Path
from ..api.ctp_constant import THOST_FTDC_PT_Net
from ..common.constant import (
EngineType, Exchange, Interval, Product, PRODUCT_CTP2VT, OPTIONTYPE_CTP2VT,
EventType, MSG_TYPE, SYMBOL_TYPE
)
from ..common.datastruct import (
Event, SubscribeRequest, OrderData, TradeData, TickData, BarData, PositionData,
CtpOrderField, ContractData, AccountData, OrderRequest
)
from ..common.utility import extract_full_symbol, load_json, save_json
from ..strategy.strategy_base import StrategyBase
from ..data.rqdata import rqdata_client
from ..data import database_manager
from ..trade.portfolio_manager import OffsetConverter
from ..engine.iengine import BaseEngine, EventEngine
CtaTemplate = StrategyBase
class StrategyEngine(BaseEngine):
"""
Send to and receive from msg server ,used for strategy
"""
config_filename = "config_server.yaml"
setting_filename = "cta_strategy_setting.json"
data_filename = "cta_strategy_data.json"
# init
def __init__(self, configfile: str = '', id: int = 1):
super().__init__(event_engine=EventEngine(10))
"""
two sockets to send and recv msg
"""
self.__active = False
self.id = os.getpid()
self.engine_type = EngineType.LIVE
self._recv_sock = Socket(SUB)
self._send_sock = Socket(PUSH)
self._handlers = defaultdict(list)
if configfile:
self.config_filename = configfile
filepath = Path.cwd().joinpath("etc/" + self.config_filename)
with open(filepath, encoding='utf8') as fd:
self._config = yaml.load(fd)
self.ordercount = 0
# stragegy manage
self.strategy_setting = {} # strategy_name: dict
self.strategy_data = {} # strategy_name: dict
self.classes = {} # class_name: stategy_class
self.strategies = {} # strategy_name: strategy
# self.classes_id = {} # class_id : strategy
# self.strategies_id = {} # strategy_ID: strategy
self.symbol_strategy_map = defaultdict(
list) # full_symbol: strategy list
self.orderid_strategy_map = {} # vt_orderid: strategy
self.strategy_orderid_map = defaultdict(
set) # strategy_name: client_order_id list
self.stop_order_count = 0 # for generating stop_orderid
self.stop_orders = {} # stop_orderid: stop_order
self.init_thread = None
self.init_queue = Queue()
# order,tick,position ,etc manage
self.ticks = {}
self.orders = {} # clientorder id list
self.trades = {}
self.positions = {}
self.accounts = {}
self.contracts = {}
self.active_orders = {} # SQ id list
self.rq_client = None
self.rq_symbols = set()
self.offset_converter = OffsetConverter(self)
self.autoinited = False
self.autostarted = False
self.dayswitched = False
self.init_engine()
# init functions
def init_engine(self):
self.init_nng()
self.init_rqdata()
self.load_contract()
self.load_strategy_class()
self.load_strategy_setting()
self.load_strategy_data()
self.register_event()
def init_nng(self):
self._recv_sock.set_string_option(
SUB, SUB_SUBSCRIBE, '') # receive msg start with all
self._recv_sock.set_int_option(SOL_SOCKET, RCVTIMEO, 100)
self._recv_sock.connect(self._config['serverpub_url'])
self._send_sock.connect(self._config['serverpull_url'])
def init_rqdata(self):
result = rqdata_client.init()
if result:
self.write_log("RQData数据接口初始化成功")
def load_contract(self):
contractfile = Path.cwd().joinpath("etc/ctpcontract.yaml")
with open(contractfile, encoding='utf8') as fc:
contracts = yaml.load(fc)
print('loading contracts, total number:', len(contracts))
for sym, data in contracts.items():
contract = ContractData(
symbol=data["symbol"],
exchange=Exchange(data["exchange"]),
name=data["name"],
product=PRODUCT_CTP2VT[str(data["product"])],
size=data["size"],
pricetick=data["pricetick"],
net_position=True if str(
data["positiontype"]) == THOST_FTDC_PT_Net else False,
long_margin_ratio=data["long_margin_ratio"],
short_margin_ratio=data["short_margin_ratio"],
full_symbol=data["full_symbol"]
)
# For option only
if contract.product == Product.OPTION:
contract.option_underlying = data["option_underlying"],
contract.option_type = OPTIONTYPE_CTP2VT.get(
str(data["option_type"]), None),
contract.option_strike = data["option_strike"],
contract.option_expiry = datetime.strptime(
str(data["option_expiry"]), "%Y%m%d"),
self.contracts[contract.full_symbol] = contract
def register_event(self):
""""""
self.event_engine.register(EventType.TICK, self.process_tick_event)
self.event_engine.register(
EventType.ORDERSTATUS, self.process_orderstatus_event)
self.event_engine.register(EventType.FILL, self.process_trade_event)
self.event_engine.register(
EventType.POSITION, self.process_position_event)
self.event_engine.register(
EventType.ACCOUNT, self.process_account_event)
self.event_engine.register(
EventType.CONTRACT, self.process_contract_event)
self.event_engine.register(
EventType.STRATEGY_CONTROL, self.process_strategycontrol_event)
self.event_engine.register(
EventType.HEADER, self.process_general_event)
self.event_engine.register(EventType.TIMER, self.process_timer_event)
# event handler
def process_timer_event(self, event):
# auto init and start strategy at 8:57, 20:57
nowtime = datetime.now().time()
dayinitflag = (nowtime > time(hour=8, minute=55)) and (
nowtime < time(hour=8, minute=56))
daystartflag = (nowtime > time(hour=8, minute=57)) and (
nowtime < time(hour=8, minute=58))
nightinitflag = (nowtime > time(hour=20, minute=55)) and (
nowtime < time(hour=20, minute=56))
nightstartflag = (nowtime > time(hour=20, minute=57)) and (
nowtime < time(hour=20, minute=58))
if (dayinitflag or nightinitflag) and (not self.autoinited):
for name, strategy in self.strategies.items():
if strategy.autostart:
self.init_strategy(name)
self.dayswitched = False
self.autoinited = True
if (daystartflag or nightstartflag) and (not self.autostarted):
for name, strategy in self.strategies.items():
if strategy.autostart:
self.start_strategy(name)
self.autostarted = True
self.dayswitched = False
# auto stop strategy at 16:00 and 3:00
if (nowtime > time(hour=16, minute=0)) and (nowtime < time(hour=16, minute=1)) and (not self.dayswitched):
for name, strategy in self.strategies.items():
if strategy.autostart:
self.reset_strategy(name)
self.dayswitched = True
self.autostarted = False
self.autoinited = False
if (nowtime > time(hour=3, minute=0)) and (nowtime < time(hour=3, minute=1)) and (not self.dayswitched):
for name, strategy in self.strategies.items():
if strategy.autostart:
self.reset_strategy(name)
self.dayswitched = True
self.autostarted = False
self.autoinited = False
def process_general_event(self, event):
for name, strategy in self.strategies.items():
self.call_strategy_func(strategy, strategy.on_headermsg, event)
pass
def process_tick_event(self, event: Event):
""""""
tick = event.data
strategies = self.symbol_strategy_map[tick.full_symbol]
if not strategies:
return
# self.check_stop_order(tick)
for strategy in strategies:
if strategy.inited:
self.call_strategy_func(strategy, strategy.on_tick, tick)
self.ticks[tick.full_symbol] = tick
def process_orderstatus_event(self, event: Event):
""""""
order = event.data
self.offset_converter.update_order(order) # 重新计算冻结
if order.clientID != self.id:
return
self.orders[order.client_order_id] = order
# If order is active, then update data in dict.
if order.is_active():
print('order is active')
self.active_orders[order.client_order_id] = order
# Otherwise, pop inactive order from in dict
elif order.client_order_id in self.active_orders:
self.active_orders.pop(order.client_order_id)
strategy = self.orderid_strategy_map.get(order.client_order_id, None)
if not strategy:
print(order.client_order_id, 'dont find strategy')
return
# Remove client_order_id if order is no longer active.
client_order_ids = self.strategy_orderid_map[strategy.strategy_name]
if (order.client_order_id in client_order_ids) and (not order.is_active()):
print('rm inactive order in strategy order map')
client_order_ids.remove(order.client_order_id)
# For server stop order, call strategy on_stop_order function
# if order.type == OrderType.STOP:
# so = StopOrder(
# full_symbol=order.full_symbol,
# direction=order.direction,
# offset=order.offset,
# price=order.price,
# volume=order.volume,
# stop_orderid=order.vt_orderid,
# strategy_name=strategy.strategy_name,
# status=STOP_STATUS_MAP[order.status],
# vt_orderid=order.vt_orderid,
# )
# self.call_strategy_func(strategy, strategy.on_stop_order, so)
# Call strategy on_order function
self.call_strategy_func(strategy, strategy.on_order, order)
def process_trade_event(self, event: Event):
""""""
trade = event.data
self.offset_converter.update_trade(trade)
if trade.clientID != self.id:
return
strategy = self.orderid_strategy_map.get(trade.client_order_id, None)
if not strategy:
return
# if trade.direction == Direction.LONG:
# strategy.pos += trade.volume
# else:
# strategy.pos -= trade.volume
self.call_strategy_func(strategy, strategy.on_trade, trade)
self.put_strategy_event(strategy)
self.trades[trade.vt_tradeid] = trade
# send qry pos to update position
m = Event(type=EventType.QRY,
des=event.source,
src=str(self.id),
msgtype=MSG_TYPE.MSG_TYPE_QRY_POS)
self._send_sock.send(m.serialize())
# self.put(m)
def process_position_event(self, event: Event):
""""""
position = event.data
self.offset_converter.update_position(position)
self.positions[position.key] = position
def process_account_event(self, event: Event):
""""""
account = event.data
self.accounts[account.accountid] = account
def process_contract_event(self, event: Event):
""""""
contract = event.data
self.contracts[contract.full_symbol] = contract
def process_strategycontrol_event(self, event: Event):
msgtype = event.msg_type
deslist = ['@*', str(self.id), '@' + str(self.id)]
if (event.destination not in deslist):
return
elif (msgtype == MSG_TYPE.MSG_TYPE_STRATEGY_STATUS):
m = Event(type=EventType.STRATEGY_CONTROL,
des='@0',
src=str(self.id),
msgtype=MSG_TYPE.MSG_TYPE_STRATEGY_STATUS
)
self._send_sock.send(m.serialize())
# elif (event.destination not in deslist ) :
# return
elif (msgtype == MSG_TYPE.MSG_TYPE_STRATEGY_ADD):
v = event.data.split('|')
classname = v[0]
strname = v[1]
fulsym = v[2]
setting = json.loads(v[3])
self.add_strategy(classname, strname, fulsym, setting)
elif (msgtype == MSG_TYPE.MSG_TYPE_STRATEGY_INIT):
self.init_strategy(event.data)
elif (msgtype == MSG_TYPE.MSG_TYPE_STRATEGY_INIT_ALL):
self.init_all_strategies()
elif (msgtype == MSG_TYPE.MSG_TYPE_STRATEGY_START):
self.start_strategy(event.data)
elif (msgtype == MSG_TYPE.MSG_TYPE_STRATEGY_START_ALL):
self.start_all_strategies()
elif (msgtype == MSG_TYPE.MSG_TYPE_STRATEGY_STOP):
self.stop_strategy(event.data)
elif (msgtype == MSG_TYPE.MSG_TYPE_STRATEGY_STOP_ALL):
self.stop_all_strategies()
elif (msgtype == MSG_TYPE.MSG_TYPE_STRATEGY_RELOAD):
self.classes.clear()
self.load_strategy_class(True)
elif (msgtype == MSG_TYPE.MSG_TYPE_STRATEGY_RESET):
self.reset_strategy(event.data)
elif (msgtype == MSG_TYPE.MSG_TYPE_STRATEGY_RESET_ALL):
self.reset_all_strategies()
elif (msgtype == MSG_TYPE.MSG_TYPE_STRATEGY_EDIT):
v = event.data.split('|')
setting = json.loads(v[1])
self.edit_strategy(v[0], setting)
elif (msgtype == MSG_TYPE.MSG_TYPE_STRATEGY_REMOVE):
if self.remove_strategy(event.data):
m = Event(type=EventType.STRATEGY_CONTROL,
data=event.data,
des='@0',
src=str(self.id),
msgtype=MSG_TYPE.MSG_TYPE_STRATEGY_RTN_REMOVE
)
self._send_sock.send(m.serialize())
elif (msgtype == MSG_TYPE.MSG_TYPE_STRATEGY_REMOVE_DUPLICATE):
self.remove_strategy(event.data, True)
elif (msgtype == MSG_TYPE.MSG_TYPE_STRATEGY_GET_DATA):
# print('begin get data')
if event.data:
strategy = self.strategies.get(event.data, None)
if strategy:
self.put_strategy_event(strategy)
else: # get all strategy data
for strategy in self.strategies.values():
self.put_strategy_event(strategy)
def call_strategy_func(
self, strategy: StrategyBase, func: Callable, params: Any = None
):
"""
Call function of a strategy and catch any exception raised.
"""
try:
if params:
func(params)
else:
func()
except Exception:
strategy.trading = False
strategy.inited = False
msg = f"触发异常已停止\n{traceback.format_exc()}"
self.write_log(msg, strategy)
# strategy manage
def add_strategy(
self, class_name: str, strategy_name: str, full_symbol: str, setting: dict
):
"""
Add a new strategy.
"""
print("begin add strategy")
if strategy_name in self.strategies:
self.write_log(f"创建策略失败,存在重名{strategy_name}")
return
if class_name not in self.classes:
self.write_log(
f'strategy class[{class_name}] not exist, please check')
return
strategy_class = self.classes[class_name]
strategy = strategy_class(self, strategy_name, full_symbol, setting)
self.strategies[strategy_name] = strategy
# Add full_symbol to strategy map.
strategies = self.symbol_strategy_map[full_symbol]
strategies.append(strategy)
# print("335 add strategy")
# Update to setting file.
self.update_strategy_setting(strategy_name, setting)
self.put_strategy_event(strategy)
print("end add strategy")
def init_strategy(self, strategy_name: str):
"""
Init a strategy.
"""
self.init_queue.put(strategy_name)
if not self.init_thread:
self.init_thread = Thread(target=self._init_strategy)
self.init_thread.start()
def _init_strategy(self):
"""
Init strategies in queue.
"""
while not self.init_queue.empty():
strategy_name = self.init_queue.get()
strategy = self.strategies[strategy_name]
if strategy.inited:
self.write_log(f"{strategy_name}已经完成初始化,禁止重复操作")
continue
self.write_log(f"{strategy_name}开始执行初始化")
# Call on_init function of strategy
self.call_strategy_func(strategy, strategy.on_init)
# Restore strategy data(variables)
data = self.strategy_data.get(strategy_name, None)
if data:
for name in strategy.variables:
value = data.get(name, None)
if value:
setattr(strategy, name, value)
# Subscribe market data
contract = self.get_contract(strategy.full_symbol)
if contract:
m = Event(type=EventType.SUBSCRIBE,
msgtype=MSG_TYPE.MSG_TYPE_SUBSCRIBE_MARKET_DATA)
m.destination = "CTP.MD"
m.source = str(self.id)
req = SubscribeRequest()
req.sym_type = SYMBOL_TYPE.CTP
req.content = contract.symbol
m.data = req
self._send_sock.send(m.serialize())
else:
self.write_log(f"行情订阅失败,找不到合约{strategy.full_symbol}", strategy)
# qry pos and acc
m = Event(type=EventType.QRY, msgtype=MSG_TYPE.MSG_TYPE_QRY_POS)
m.destination = strategy.api + '.' + strategy.account
m.source = str(self.id)
self._send_sock.send(m.serialize())
m = Event(type=EventType.QRY,
msgtype=MSG_TYPE.MSG_TYPE_QRY_ACCOUNT)
m.destination = strategy.api + '.' + strategy.account
m.source = str(self.id)
self._send_sock.send(m.serialize())
# Put event to update init completed status.
strategy.inited = True
self.put_strategy_event(strategy)
self.write_log(f"{strategy_name}初始化完成")
self.init_thread = None
def start_strategy(self, strategy_name: str):
"""
Start a strategy.
"""
strategy = self.strategies[strategy_name]
if not strategy.inited:
self.write_log(f"策略{strategy.strategy_name}启动失败,请先初始化")
return
if strategy.trading:
self.write_log(f"{strategy_name}已经启动,请勿重复操作")
return
# qry pos and acc
m = Event(type=EventType.QRY, msgtype=MSG_TYPE.MSG_TYPE_QRY_POS)
m.destination = strategy.api + '.' + strategy.account
m.source = str(self.id)
self._send_sock.send(m.serialize())
m = Event(type=EventType.QRY,
msgtype=MSG_TYPE.MSG_TYPE_QRY_ACCOUNT)
m.destination = strategy.api + '.' + strategy.account
m.source = str(self.id)
self._send_sock.send(m.serialize())
self.call_strategy_func(strategy, strategy.on_start)
strategy.trading = True
self.put_strategy_event(strategy)
def stop_strategy(self, strategy_name: str):
"""
Stop a strategy.
"""
strategy = self.strategies[strategy_name]
if not strategy.trading:
return
# Call on_stop function of the strategy
self.call_strategy_func(strategy, strategy.on_stop)
# Change trading status of strategy to False
strategy.trading = False
# Cancel all orders of the strategy
self.cancel_all(strategy)
# Update GUI
self.put_strategy_event(strategy)
def reset_strategy(self, strategy_name: str):
"Reset a strategy"
strategy = self.strategies[strategy_name]
if not strategy.inited:
return
# stop first
self.call_strategy_func(strategy, strategy.on_stop)
strategy.trading = False
self.cancel_all(strategy)
# reset
self.call_strategy_func(strategy, strategy.on_reset)
strategy.inited = False
self.put_strategy_event(strategy)
def edit_strategy(self, strategy_name: str, setting: dict):
"""
Edit parameters of a strategy.
"""
strategy = self.strategies[strategy_name]
strategy.update_setting(setting)
self.update_strategy_setting(strategy_name, setting)
self.put_strategy_event(strategy)
def remove_strategy(self, strategy_name: str, duplicate: bool = False):
"""
Remove a strategy.
"""
print("begin remove")
strategy = self.strategies[strategy_name]
if strategy.trading:
self.write_log(f"策略{strategy.strategy_name}移除失败,请先停止")
return
# Remove setting
if not duplicate:
self.remove_strategy_setting(strategy_name)
# Remove from symbol strategy map
strategies = self.symbol_strategy_map[strategy.full_symbol]
strategies.remove(strategy)
# Remove from active orderid map
if strategy_name in self.strategy_orderid_map:
orderids = self.strategy_orderid_map.pop(strategy_name)
# Remove vt_orderid strategy map
for _orderid in orderids:
if _orderid in self.orderid_strategy_map:
self.orderid_strategy_map.pop(_orderid)
# Remove from strategies
self.strategies.pop(strategy_name)
print("end remove")
return True
def load_strategy_class(self, reload: bool = False):
"""
Load strategy class from source code.
"""
# app_path = Path(__file__).parent.parent
# path1 = app_path.joinpath("cta_strategy", "strategies")
# self.load_strategy_class_from_folder(
# path1, "vnpy.app.cta_strategy.strategies")
path2 = Path.cwd().joinpath("mystrategy")
self.load_strategy_class_from_folder(path2, "", reload)
def load_strategy_class_from_folder(self, path: Path, module_name: str = "", reload: bool = False):
"""
Load strategy class from certain folder.
"""
for dirpath, dirnames, filenames in os.walk(path):
for filename in filenames:
if filename.endswith(".py"):
strategy_module_name = "mystrategy.".join(
[module_name, filename.replace(".py", "")])
self.load_strategy_class_from_module(
strategy_module_name, reload)
def load_strategy_class_from_module(self, module_name: str, reload: bool = False):
"""
Load strategy class from module file.
"""
try:
module = importlib.import_module(module_name)
# if reload delete old attribute
if reload:
for attr in dir(module):
if attr not in ('__name__', '__file__'):
delattr(module, attr)
importlib.reload(module)
for name in dir(module):
value = getattr(module, name)
if (isinstance(value, type) and issubclass(value, CtaTemplate) and value is not CtaTemplate):
self.classes[value.__name__] = value
except: # noqa
msg = f"策略文件{module_name}加载失败,触发异常:\n{traceback.format_exc()}"
self.write_log(msg)
def load_strategy_data(self):
"""
Load strategy data from json file.
"""
self.strategy_data = load_json(self.data_filename)
def sync_strategy_data(self, strategy: StrategyBase):
"""
Sync strategy data into json file.
"""
data = strategy.get_variables()
# Strategy status (inited, trading) should not be synced.
data.pop("inited")
data.pop("trading")
self.strategy_data = load_json(self.data_filename)
self.strategy_data[strategy.strategy_name] = data
save_json(self.data_filename, self.strategy_data)
def get_all_strategy_class_names(self):
"""
Return names of strategy classes loaded.
"""
return list(self.classes.keys())
def get_strategy_class_parameters(self, class_name: str):
"""
Get default parameters of a strategy class.
"""
strategy_class = self.classes[class_name]
parameters = {}
for name in strategy_class.parameters:
parameters[name] = getattr(strategy_class, name)
return parameters
def get_strategy_parameters(self, strategy_name):
"""
Get parameters of a strategy.
"""
strategy = self.strategies[strategy_name]
return strategy.get_parameters()
def init_all_strategies(self):
"""
"""
for strategy_name in self.strategies.keys():
self.init_strategy(strategy_name)
def start_all_strategies(self):
"""
"""
for strategy_name in self.strategies.keys():
self.start_strategy(strategy_name)
def stop_all_strategies(self):
"""
"""
for strategy_name in self.strategies.keys():
self.stop_strategy(strategy_name)
def reset_all_strategies(self):
for strategy_name in self.strategies.keys():
self.reset_strategy(strategy_name)
def load_strategy_setting(self):
"""
Load setting file.
"""
self.strategy_setting = load_json(self.setting_filename)
for strategy_name, strategy_config in self.strategy_setting.items():
self.add_strategy(
strategy_config["class_name"],
strategy_name,
strategy_config["full_symbol"],
strategy_config["setting"]
)
def update_strategy_setting(self, strategy_name: str, setting: dict):
"""
Update setting file.
"""
strategy = self.strategies[strategy_name]
# in order to save other engine's setting, should load again
self.strategy_setting = load_json(self.setting_filename)
self.strategy_setting[strategy_name] = {
"class_name": strategy.__class__.__name__,
"full_symbol": strategy.full_symbol,
"setting": setting,
}
save_json(self.setting_filename, self.strategy_setting)
def remove_strategy_setting(self, strategy_name: str):
"""
Update setting file.
"""
if strategy_name not in self.strategy_setting:
return
# in order to save other engine's setting, should load again
self.strategy_setting = load_json(self.setting_filename)
self.strategy_setting.pop(strategy_name)
save_json(self.setting_filename, self.strategy_setting)
# def put_stop_order_event(self, stop_order: StopOrder):
# """
# Put an event to update stop order status.
# """
# event = Event(EVENT_CTA_STOPORDER, stop_order)
# self.event_engine.put(event)
def put_strategy_event(self, strategy: StrategyBase):
"""
Put an event to update strategy status.
"""
data = strategy.get_data()
sdata = {}
sdata[strategy.strategy_name] = data
# event = Event(EVENT_CTA_STRATEGY, data)
# self.event_engine.put(event)
msg = json.dumps(sdata)
m = Event(type=EventType.STRATEGY_CONTROL, data=msg, des='@0', src=str(
self.id), msgtype=MSG_TYPE.MSG_TYPE_STRATEGY_RTN_DATA)
self._send_sock.send(m.serialize())
#save_json(self.data_filename, sdata)
# strategy functions
#get ,qry
def query_bar_from_rq(
self, symbol: str, exchange: Exchange, interval: Interval, start: datetime, end: datetime
):
"""
Query bar data from RQData.
"""
data = rqdata_client.query_bar(
symbol, exchange, interval, start, end
)
return data
def load_bar(
self,
full_symbol: str,
days: int,
interval: Interval,
callback: Callable[[BarData], None],
datasource: str = 'DataBase'
):
""""""
tradedays = abs(days)
weekday = datetime.now().weekday()
adddays = 2 if (days - weekday > 0) else 0
if weekday == 6:
tradedays = days + 1
else:
tradedays = days + adddays
symbol, exchange = extract_full_symbol(full_symbol)
end = datetime.now()
start = end - timedelta(days=tradedays)
# Query bars from RQData by default, if not found, load from database.
bars = self.query_bar_from_rq(symbol, exchange, interval, start, end)
if not bars:
bars = database_manager.load_bar_data(
symbol=symbol,
exchange=exchange,
interval=interval,
start=start,
end=end,
)
for bar in bars:
callback(bar)
def load_tick(self, full_symbol: str, days: int, callback: Callable, datasource: str = 'DataBase'):
tradedays = abs(days)
weekday = datetime.now().weekday()
adddays = 2 if (days - weekday > 0) else 0
if weekday == 6:
tradedays = days + 1
else:
tradedays = days + adddays
symbol, exchange = extract_full_symbol(full_symbol)
end = datetime.now()
start = end - timedelta(tradedays)
ticks = database_manager.load_tick_data(symbol, exchange, start, end)
for tick in ticks:
callback(tick)
def get_tick(self, full_symbol):
"""
Get latest market tick data by full_symbol.
"""
return self.ticks.get(full_symbol, None)
def get_order(self, orderid: int):
"""
Get latest order data by orderid.
"""
return self.orders.get(orderid, None)
def get_trade(self, vt_tradeid):
"""
Get trade data by vt_tradeid.
"""
return self.trades.get(vt_tradeid, None)
def get_position(self, key):
"""
Get latest position data by vt_positionid.
"""
return self.positions.get(key, None)
def get_account(self, accountid):
"""
Get latest account data by accountid.
"""
return self.accounts.get(accountid, None)
def get_contract(self, full_symbol):
"""
Get contract data by full_symbol.
"""
return self.contracts.get(full_symbol, None)
def get_all_ticks(self):
"""
Get all tick data.
"""
return list(self.ticks.values())
def get_all_orders(self):
"""
Get all order data.
"""
return list(self.orders.values())
def get_all_trades(self):
"""
Get all trade data.
"""
return list(self.trades.values())
def get_all_positions(self):
"""
Get all position data.
"""
return list(self.positions.values())
def get_all_accounts(self):
"""
Get all account data.
"""
return list(self.accounts.values())
def get_all_contracts(self):
"""
Get all contract data.
"""
return list(self.contracts.values())
def get_all_active_orders(self, full_symbol: str = ""):
"""
Get all active orders by full_symbol.
If full_symbol is empty, return all active orders.
"""
if not full_symbol:
return list(self.active_orders.values())
else:
active_orders = [
order
for order in self.active_orders.values()
if order.full_symbol == full_symbol
]
return active_orders
def get_position_holding(self, acc: str, full_symbol: str):
return self.offset_converter.get_position_holding(acc, full_symbol)
def get_strategy_active_orderids(self, strategy_name: str):
oidset = self.strategy_orderid_map[strategy_name]
return oidset
#order, cancel
def send_order(
self,
strategy: StrategyBase,
original_req: OrderRequest,
lock: bool = False
):
"""
Send a new order to server.
"""
# Convert with offset converter
req_list = self.offset_converter.convert_order_request(
original_req, lock)
# Send Orders
orderids = []
for req in req_list:
req.clientID = self.id
req.client_order_id = self.ordercount
self.ordercount += 1
m = Event(type=EventType.ORDER,
data=req,
des=req.api + '.' + req.account,
src=str(self.id)
)
if req.api == "CTP.TD":
m.msg_type = MSG_TYPE.MSG_TYPE_ORDER_CTP
elif req.api == "PAPER.TD":
m.msg_type = MSG_TYPE.MSG_TYPE_ORDER_PAPER
else:
print("error:api not support!")
return []
msg = m.serialize()
print(f'tradeclient {self.id} send msg: {msg}')
self._send_sock.send(msg)
orderids.append(req.client_order_id)
self.offset_converter.update_order_request(req)
# Save relationship between orderid and strategy.
self.orderid_strategy_map[req.client_order_id] = strategy
self.strategy_orderid_map[strategy.strategy_name].add(
req.client_order_id)
return orderids
def cancel_order(self, strategy: StrategyBase, orderid: int):
"""
Cancel existing order by orderid.
"""
order = self.get_order(orderid)
if not order:
self.write_log(f"撤单失败,找不到委托{orderid}", strategy)
return
req = order.create_cancel_request()
m = Event(type=EventType.CANCEL,
data=req,
des=order.api + '.' + order.account,
src=str(self.id),
msgtype=MSG_TYPE.MSG_TYPE_ORDER_ACTION
)
msg = m.serialize()
print(f'tradeclient {self.id} send msg: {msg}')
self._send_sock.send(msg)
def cancel_all(self, strategy: StrategyBase):
"""
Cancel all active orders of a strategy.
"""
orderids = self.strategy_orderid_map[strategy.strategy_name]
if not orderids:
print(strategy.strategy_name, 'has no active order')
return
for orderid in copy(orderids):
print('cancel oid:', orderid)
self.cancel_order(strategy, orderid)
def send_testmsg(self):
m = Event(des='CTP.MD', src=str(self.id),
msgtype=MSG_TYPE.MSG_TYPE_TEST)
msg = m.serialize()
self._send_sock.send(msg)
print(f'tradeclient {self.id} send msg: {msg}')
# start and stop
def start(self, timer=True):
"""
start the dispatcher thread and begin to recv msg through nng
"""
self.event_engine.start()
print('tradeclient started ,pid = %d ' % os.getpid())
self.__active = True
while self.__active:
try:
msgin = self._recv_sock.recv(flags=0)
msgin = msgin.decode("utf-8")
if msgin is not None and msgin.index('|') > 0:
if msgin[0] == '@':
print('tradeclient(pid = %d) rec @ msg:' %
(self.id), msgin, 'at ', datetime.now())
if msgin[-1] == '\0':
msgin = msgin[:-1]
if msgin[-1] == '\x00':
msgin = msgin[:-1]
m = Event()
m.deserialize(msgin)
self.event_engine.put(m)
except Exception as e:
pass
#print("TradeEngineError {0}".format(str(e.args[0])).encode("utf-8"))
def stop(self):
"""
stop
"""
self.__active = False
self.event_engine.stop()
def put(self, event):
"""
send event msg,TODO:check the event
"""
#
self._send_sock.send(event.serialize(), flags=1)
def register_handler(self, type_, handler):
"""
register handler/subscriber
"""
# self.event_engine.register(type_,handler)
# handlerList = self._handlers[type_]
# if handler not in handlerList:
# self._handlers[type_].append(handler)
# #handlerList.append(handler)
pass
def unregister_handler(self, type_, handler):
"""
unregister handler/subscriber
"""
# handlerList = self._handlers[type_]
# if handler in handlerList:
# self._handlers.remove(handler)
# if not handlerList:
# del self._handlers[type_]
pass
def write_log(self, msg: str, strategy: StrategyBase = None):
"""
Create engine log event.
"""
# if strategy:
# msg = f"{strategy.strategy_name}: {msg}"
# log = LogData(msg=msg, gateway_name="CtaStrategy")
# event = Event(type=EVENT_CTA_LOG, data=log)
# self.event_engine.put(event)
print(msg)
# -------------------------------- end of public functions -----------------------------#
|
inference_lclcl.py
|
import numpy as np
import matplotlib.pyplot as plt
import PIL as PIL
from os import listdir
from os.path import isfile, join
import tensorflow as tf
import random
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import threading
from queue import Queue
import traceback
from zipfile import ZipFile
import time
import scipy
from PIL import ImageEnhance
from PIL import ImageFilter
import ntpath
import re
import itertools
print_debug = False
# set tf backend to allow memory to grow, instead of claiming everything
import tensorflow as tf
def get_session():
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
return tf.Session(config=config)
import keras
# set the modified tf session as backend in keras
my_sess = get_session()
keras.backend.tensorflow_backend.set_session(my_sess)
import keras as K
from keras import backend as KBE
nb_channel = 3
typeimg = '5xrgbimage'
imgW = 75 * 5
imgH = 75
letters = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I',
'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R',
'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '0',
'1', '2', '3', '4', '5', '6', '7', '8', '9']
maxTextLen = 5
input_length_for_ctc = 64
O_CTC = 0
def CTCLambdaFunc(args):
y_pred, y_true, input_length, label_length = args
# the O_CTC is critical here since the first couple outputs of the RNN tend to be garbage:
y_pred = y_pred[:, 0:, :]
def LoadImagesFromZip(imgPath, zipFilePath):
zipfile = ZipFile(zipFilePath)
files = [name[1] for name in enumerate(zipfile.namelist()) if re.match(imgPath + '/\w+\.jpg', name[1]) is not None]
return files, zipfile
def loadData(zip_file_path, subsetPercentage=None):
trainFiles, trainZipFile = LoadImagesFromZip('train', zip_file_path)
valFiles, valZipFile = LoadImagesFromZip('valid', zip_file_path)
testFiles, testZipFile = LoadImagesFromZip('test', zip_file_path)
if 0 == len(testFiles):
print(' * Using images for TEST from zipfile ' + zip_file_path + '. Have them ready in [basedir_images].')
testFiles, testZipFile = LoadImagesFromZip('test_real_all', zip_file_path)
elif subsetPercentage is not None:
indices = random.sample(range(len(trainFiles)), int(subsetPercentage * len(trainFiles)))
trainFiles = [trainFiles[i] for i in sorted(indices)]
indices = random.sample(range(len(valFiles)), int(subsetPercentage * len(valFiles)))
valFiles = [valFiles[i] for i in sorted(indices)]
indices = random.sample(range(len(testFiles)), int(subsetPercentage * len(testFiles)))
testFiles = [testFiles[i] for i in sorted(indices)]
imagecount_train = len(trainFiles)
imagecount_val = len(valFiles)
imagecount_test = len(testFiles)
print('Using ' + str(imagecount_train) + ' files for training, ' + str(imagecount_val) + ' for validation and ' + str(imagecount_test) + ' for testing.')
return trainFiles, trainZipFile, valFiles, valZipFile, testFiles, testZipFile
g_lock = threading.Lock()
def NextSample(n, indexes, currIndex, imgFiles):
if currIndex >= n: # This will never be true in multithread.
currIndex = 0
random.shuffle(indexes)
return imgFiles[indexes[currIndex]], currIndex + 1
def find_hsv(img):
r,g,b = img.split()
Hdat = []
Sdat = []
Vdat = []
import colorsys
for rd,gn,bl in zip(r.getdata(),g.getdata(),b.getdata()) :
h,s,v = colorsys.rgb_to_hsv(rd/255.,gn/255.,bl/255.)
Hdat.append(int(h*255.))
Sdat.append(int(s*255.))
Vdat.append(int(v*255.))
return [np.asarray(Hdat),np.asarray(Sdat),np.asarray(Vdat)]
def create5xLargeRGBImage(img_file, imgW, imgH):
img = PIL.Image.open(img_file).resize((imgW // 5, imgH), PIL.Image.ANTIALIAS)
h, s, v = find_hsv(img)
mv = 0.85 * v.mean()
img = img.convert("L")
w1 = img.point(lambda x: 32 if x < mv else 128)
w2 = ImageEnhance.Brightness(w1).enhance(0.33)
w3 = ImageEnhance.Brightness(w1).enhance(3)
w4 = ImageEnhance.Contrast(w1).enhance(2 * 3)
w5 = ImageEnhance.Sharpness(w1).enhance(2 * 3)
new_im = PIL.Image.new('L', (imgW, imgH))
x_offset = 0
for im in [w1, w2, w3, w4, w5]:
new_im.paste(im, (x_offset, 0))
x_offset += im.size[0]
new_im = new_im.convert('RGB')
assert isinstance(new_im, PIL.Image.Image)
assert new_im.width == imgW, '(~W) ' + str(new_im.width()) + ' == ' + str(imgW)
assert new_im.height == imgH, '(~H) ' + str(new_im.height()) + ' == ' + str(imgH)
return new_im
def ReadConvertImage(imgPath, basedir_images, imgW, imgH, conv, nb_channel):
img_file = basedir_images + imgPath
# Read the specified file.
if conv == 'RGB':
assert 3 == nb_channel, '3!=' + str(nb_channel)
img = scipy.misc.imread(name=img_file, mode='RGB')
img = scipy.misc.imresize(img, (imgH, imgW), interp='nearest')
img = np.moveaxis(img, 1, 0)
img = img.astype(np.float32)
img /= 255.0
elif conv == '1ximage':
assert 1 == nb_channel, '1!=' + str(nb_channel)
img = create1xImage(img_file, imgW, imgH)
img = np.asarray(img)
elif conv == '1xrgbimage':
assert 3 == nb_channel, '3!=' + str(nb_channel)
img = create1xRGBImage(img_file, imgW, imgH)
img = np.asarray(img)
elif conv == '5xrgbimage':
assert 3 == nb_channel, '3!=' + str(nb_channel)
img = create5xLargeRGBImage(img_file, imgW, imgH)
img = np.asarray(img)
img = np.moveaxis(img, 1, 0)
elif conv == '5xLimage':
assert 1 == nb_channel, '1!=' + str(nb_channel)
img = create5xLargeImage(img_file, imgW, imgH)
img = np.asarray(img)
else:
assert False
assert imgW == img.shape[0], '(W)' + str(imgW) + ' == ' + str(img.shape[0]) + ' img.shape=' + str(img.shape)
assert imgH == img.shape[1], '(H)' + str(imgH) + ' == ' + str(img.shape[1]) + ' img.shape=' + str(img.shape)
text = ntpath.basename(os.path.splitext(img_file)[0])
if '_' in text:
text = text.split('_')[0]
return img, text
def TextToLabels(text,letters):
return list(map(lambda x: letters.index(x), text))
def NextBatchAsync(n, batchSize, imgPaths, basedir_images, imgW, imgH, nb_channel, maxTextLen, typeimg,
input_length_for_ctc, O_CTC, letters, nb_workers):
qData = Queue()
qCurrIndex = Queue()
qCurrIndex.put(0)
indexes = list(range(n))
random.shuffle(indexes)
def worker():
while True:
start = time.clock()
try:
if qData.qsize() > 32:
time.sleep(1)
continue
assert not KBE.image_data_format() == 'channels_first'
X_data = np.ones([batchSize, imgW, imgH, nb_channel])
Y_data = np.ones([batchSize, maxTextLen])
input_length = np.ones((batchSize, 1)) * (input_length_for_ctc - O_CTC)
label_length = np.zeros((batchSize, 1))
startIndex = qCurrIndex.get()
if startIndex >= n or startIndex + batchSize >= n:
startIndex = 0
random.shuffle(indexes)
qCurrIndex.put(startIndex + batchSize)
for i in range(batchSize):
imgFile, startIndex = NextSample(n, indexes, startIndex, imgPaths)
img, text = ReadConvertImage(imgFile, basedir_images, imgW, imgH, typeimg, nb_channel)
if typeimg == 'RGB' or typeimg == '1xrgbimage' or typeimg == '5xrgbimage':
assert 3 == nb_channel
X_data[i] = img
else:
assert 1 == nb_channel
X_data[i] = np.expand_dims(img, -1)
Y_data[i] = TextToLabels(text, letters)
label_length[i] = len(text)
assert maxTextLen == len(Y_data[i])
assert 5 == len(text)
inputs = {
'the_input': X_data,
'the_labels': Y_data,
'input_length': input_length,
'label_length': label_length,
}
outputs = {'ctc': np.zeros([batchSize])}
qData.put((inputs, outputs))
except Exception:
with g_lock:
print('[' + threading.current_thread().name + '] Error in generator.')
with g_lock:
traceback.print_exc()
pass
end = time.clock()
with g_lock:
if print_debug:
print('[' + threading.current_thread().name + ']Preparing batch of ' + str(batchSize) + ' elements in ' + str((end - start)) + ' seconds.')
# Create the thread pool.
for i in range(nb_workers):
t = threading.Thread(target=worker)
t.daemon = False
t.start()
while True:
start = time.clock()
inputs, outputs = qData.get()
end = time.clock()
with g_lock:
if print_debug:
print('[' + threading.current_thread().name + ']Getting batch of ' + str(batchSize) + ' elements in ' + str((end - start)))
yield (inputs, outputs)
def decode_batch(out, O_CTC, letters):
ret = []
for j in range(out.shape[0]):
out_best = list(np.argmax(out[j, O_CTC:], 1))
out_best = [k for k, g in itertools.groupby(out_best)]
outstr = ''
for c in out_best:
if c < len(letters):
outstr += letters[c]
ret.append(outstr)
return ret
basedir_images = 'F:/AMATEUR/LCLCL_images_pour_test/'
trainFiles, trainZipFile, valFiles, valZipFile, testFiles, testZipFile = loadData(zip_file_path='F:/AMATEUR/LCLCL_images_pour_test/LCLCL__real_all.zip')
model_to_reload = 'F:/AMATEUR/lclcl_models/gen20_2000000_DensenNet201_5xrgbimage_375x75x3_weights.18-0.0457.hdf5'
custom_objects = {'<lambda>': lambda y_true, y_pred: y_pred}
start = time.clock()
trained = keras.models.load_model(model_to_reload, custom_objects=custom_objects)
end = time.clock()
print(end-start)
batch_size_test = 8
imagecount_test = len(testFiles) # Number of images to process.
name_in='the_input'
name_out='softmax3'
net_inp = trained.get_layer(name=name_in).input
net_out = trained.get_layer(name=name_out).output
for inp_value, _ in NextBatchAsync(imagecount_test, batch_size_test, testFiles, basedir_images, imgW, imgH, nb_channel, maxTextLen, typeimg, input_length_for_ctc, O_CTC,letters,nb_workers=4):
start = time.clock()
bs = inp_value['the_input'].shape[0]
assert bs == batch_size_test
X_data = inp_value['the_input']
net_out_value = my_sess.run(net_out, feed_dict={net_inp:X_data})
Y_pred_text = decode_batch(net_out_value,O_CTC,letters)
Y_true = inp_value['the_labels']
end = time.clock()
print(end - start)
|
master.py
|
# -*- coding: utf-8 -*-
'''
This module contains all of the routines needed to set up a master server, this
involves preparing the three listeners and the workers needed by the master.
'''
# Import python libs
from __future__ import absolute_import, with_statement, print_function, unicode_literals
import copy
import ctypes
import functools
import os
import re
import sys
import time
import signal
import stat
import logging
import collections
import multiprocessing
import threading
import salt.serializers.msgpack
# pylint: disable=import-error,no-name-in-module,redefined-builtin
from salt.ext import six
from salt.ext.six.moves import range
from salt.utils.zeromq import zmq, ZMQDefaultLoop, install_zmq, ZMQ_VERSION_INFO
# pylint: enable=import-error,no-name-in-module,redefined-builtin
import salt.ext.tornado.gen # pylint: disable=F0401
# Import salt libs
import salt.crypt
import salt.cli.batch_async
import salt.client
import salt.client.ssh.client
import salt.exceptions
import salt.payload
import salt.pillar
import salt.state
import salt.runner
import salt.auth
import salt.wheel
import salt.minion
import salt.key
import salt.acl
import salt.engines
import salt.daemons.masterapi
import salt.defaults.exitcodes
import salt.transport.server
import salt.log.setup
import salt.utils.args
import salt.utils.atomicfile
import salt.utils.crypt
import salt.utils.event
import salt.utils.files
import salt.utils.gitfs
import salt.utils.gzip_util
import salt.utils.jid
import salt.utils.job
import salt.utils.master
import salt.utils.minions
import salt.utils.platform
import salt.utils.process
import salt.utils.schedule
import salt.utils.ssdp
import salt.utils.stringutils
import salt.utils.user
import salt.utils.verify
import salt.utils.zeromq
from salt.config import DEFAULT_INTERVAL
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.transport import iter_transport_opts
from salt.utils.debug import (
enable_sigusr1_handler, enable_sigusr2_handler, inspect_stack
)
from salt.utils.event import tagify
from salt.utils.odict import OrderedDict
try:
import resource
HAS_RESOURCE = True
except ImportError:
# resource is not available on windows
HAS_RESOURCE = False
# Import halite libs
try:
import halite # pylint: disable=import-error
HAS_HALITE = True
except ImportError:
HAS_HALITE = False
from salt.ext.tornado.stack_context import StackContext
from salt.utils.ctx import RequestContext
log = logging.getLogger(__name__)
class SMaster(object):
'''
Create a simple salt-master, this will generate the top-level master
'''
secrets = {} # mapping of key -> {'secret': multiprocessing type, 'reload': FUNCTION}
def __init__(self, opts):
'''
Create a salt master server instance
:param dict opts: The salt options dictionary
'''
self.opts = opts
self.master_key = salt.crypt.MasterKeys(self.opts)
self.key = self.__prep_key()
# We need __setstate__ and __getstate__ to also pickle 'SMaster.secrets'.
# Otherwise, 'SMaster.secrets' won't be copied over to the spawned process
# on Windows since spawning processes on Windows requires pickling.
# These methods are only used when pickling so will not be used on
# non-Windows platforms.
def __setstate__(self, state):
self.opts = state['opts']
self.master_key = state['master_key']
self.key = state['key']
SMaster.secrets = state['secrets']
def __getstate__(self):
return {'opts': self.opts,
'master_key': self.master_key,
'key': self.key,
'secrets': SMaster.secrets}
def __prep_key(self):
'''
A key needs to be placed in the filesystem with permissions 0400 so
clients are required to run as root.
'''
return salt.daemons.masterapi.access_keys(self.opts)
class Maintenance(salt.utils.process.SignalHandlingProcess):
'''
A generalized maintenance process which performs maintenance routines.
'''
def __init__(self, opts, **kwargs):
'''
Create a maintenance instance
:param dict opts: The salt options
'''
super(Maintenance, self).__init__(**kwargs)
self.opts = opts
# How often do we perform the maintenance tasks
self.loop_interval = int(self.opts['loop_interval'])
# Track key rotation intervals
self.rotate = int(time.time())
# A serializer for general maint operations
self.serial = salt.payload.Serial(self.opts)
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self.__init__(
state['opts'],
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {
'opts': self.opts,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def _post_fork_init(self):
'''
Some things need to be init'd after the fork has completed
The easiest example is that one of these module types creates a thread
in the parent process, then once the fork happens you'll start getting
errors like "WARNING: Mixing fork() and threads detected; memory leaked."
'''
# Load Runners
ropts = dict(self.opts)
ropts['quiet'] = True
runner_client = salt.runner.RunnerClient(ropts)
# Load Returners
self.returners = salt.loader.returners(self.opts, {})
# Init Scheduler
self.schedule = salt.utils.schedule.Schedule(self.opts,
runner_client.functions_dict(),
returners=self.returners)
self.ckminions = salt.utils.minions.CkMinions(self.opts)
# Make Event bus for firing
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
# Init any values needed by the git ext pillar
self.git_pillar = salt.daemons.masterapi.init_git_pillar(self.opts)
self.presence_events = False
if self.opts.get('presence_events', False):
tcp_only = True
for transport, _ in iter_transport_opts(self.opts):
if transport != 'tcp':
tcp_only = False
if not tcp_only:
# For a TCP only transport, the presence events will be
# handled in the transport code.
self.presence_events = True
def run(self):
'''
This is the general passive maintenance process controller for the Salt
master.
This is where any data that needs to be cleanly maintained from the
master is maintained.
'''
salt.utils.process.appendproctitle(self.__class__.__name__)
# init things that need to be done after the process is forked
self._post_fork_init()
# Make Start Times
last = int(time.time())
last_git_pillar_update = last
git_pillar_update_interval = self.opts.get('git_pillar_update_interval', 0)
old_present = set()
while True:
now = int(time.time())
if (now - last) >= self.loop_interval:
salt.daemons.masterapi.clean_old_jobs(self.opts)
salt.daemons.masterapi.clean_expired_tokens(self.opts)
salt.daemons.masterapi.clean_pub_auth(self.opts)
if (now - last_git_pillar_update) >= git_pillar_update_interval:
last_git_pillar_update = now
self.handle_git_pillar()
self.handle_schedule()
self.handle_key_cache()
self.handle_presence(old_present)
self.handle_key_rotate(now)
salt.utils.verify.check_max_open_files(self.opts)
last = now
time.sleep(self.loop_interval)
def handle_key_cache(self):
'''
Evaluate accepted keys and create a msgpack file
which contains a list
'''
if self.opts['key_cache'] == 'sched':
keys = []
#TODO DRY from CKMinions
if self.opts['transport'] in ('zeromq', 'tcp'):
acc = 'minions'
else:
acc = 'accepted'
for fn_ in os.listdir(os.path.join(self.opts['pki_dir'], acc)):
if not fn_.startswith('.') and os.path.isfile(os.path.join(self.opts['pki_dir'], acc, fn_)):
keys.append(fn_)
log.debug('Writing master key cache')
# Write a temporary file securely
if six.PY2:
with salt.utils.atomicfile.atomic_open(os.path.join(self.opts['pki_dir'], acc, '.key_cache')) as cache_file:
self.serial.dump(keys, cache_file)
else:
with salt.utils.atomicfile.atomic_open(os.path.join(self.opts['pki_dir'], acc, '.key_cache'), mode='wb') as cache_file:
self.serial.dump(keys, cache_file)
def handle_key_rotate(self, now):
'''
Rotate the AES key rotation
'''
to_rotate = False
dfn = os.path.join(self.opts['cachedir'], '.dfn')
try:
stats = os.stat(dfn)
# Basic Windows permissions don't distinguish between
# user/group/all. Check for read-only state instead.
if salt.utils.platform.is_windows() and not os.access(dfn, os.W_OK):
to_rotate = True
# Cannot delete read-only files on Windows.
os.chmod(dfn, stat.S_IRUSR | stat.S_IWUSR)
elif stats.st_mode == 0o100400:
to_rotate = True
else:
log.error('Found dropfile with incorrect permissions, ignoring...')
os.remove(dfn)
except os.error:
pass
if self.opts.get('publish_session'):
if now - self.rotate >= self.opts['publish_session']:
to_rotate = True
if to_rotate:
log.info('Rotating master AES key')
for secret_key, secret_map in six.iteritems(SMaster.secrets):
# should be unnecessary-- since no one else should be modifying
with secret_map['secret'].get_lock():
secret_map['secret'].value = salt.utils.stringutils.to_bytes(secret_map['reload']())
self.event.fire_event({'rotate_{0}_key'.format(secret_key): True}, tag='key')
self.rotate = now
if self.opts.get('ping_on_rotate'):
# Ping all minions to get them to pick up the new key
log.debug('Pinging all connected minions '
'due to key rotation')
salt.utils.master.ping_all_connected_minions(self.opts)
def handle_git_pillar(self):
'''
Update git pillar
'''
try:
for pillar in self.git_pillar:
pillar.fetch_remotes()
except Exception as exc: # pylint: disable=broad-except
log.error('Exception caught while updating git_pillar',
exc_info=True)
def handle_schedule(self):
'''
Evaluate the scheduler
'''
try:
self.schedule.eval()
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if self.schedule.loop_interval < self.loop_interval:
self.loop_interval = self.schedule.loop_interval
except Exception as exc: # pylint: disable=broad-except
log.error('Exception %s occurred in scheduled job', exc)
self.schedule.cleanup_subprocesses()
def handle_presence(self, old_present):
'''
Fire presence events if enabled
'''
# On the first run it may need more time for the EventPublisher
# to come up and be ready. Set the timeout to account for this.
if self.presence_events and self.event.connect_pull(timeout=3):
present = self.ckminions.connected_ids()
new = present.difference(old_present)
lost = old_present.difference(present)
if new or lost:
# Fire new minions present event
data = {'new': list(new),
'lost': list(lost)}
self.event.fire_event(data, tagify('change', 'presence'))
data = {'present': list(present)}
self.event.fire_event(data, tagify('present', 'presence'))
old_present.clear()
old_present.update(present)
class FileserverUpdate(salt.utils.process.SignalHandlingProcess):
'''
A process from which to update any dynamic fileserver backends
'''
def __init__(self, opts, **kwargs):
super(FileserverUpdate, self).__init__(**kwargs)
self.opts = opts
self.update_threads = {}
# Avoid circular import
import salt.fileserver
self.fileserver = salt.fileserver.Fileserver(self.opts)
self.fill_buckets()
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self.__init__(
state['opts'],
log_queue=state['log_queue'],
)
def __getstate__(self):
return {'opts': self.opts,
'log_queue': self.log_queue,
}
def fill_buckets(self):
'''
Get the configured backends and the intervals for any backend which
supports them, and set up the update "buckets". There will be one
bucket for each thing being updated at a given interval.
'''
update_intervals = self.fileserver.update_intervals()
self.buckets = {}
for backend in self.fileserver.backends():
fstr = '{0}.update'.format(backend)
try:
update_func = self.fileserver.servers[fstr]
except KeyError:
log.debug(
'No update function for the %s filserver backend',
backend
)
continue
if backend in update_intervals:
# Variable intervals are supported for this backend
for id_, interval in six.iteritems(update_intervals[backend]):
if not interval:
# Don't allow an interval of 0
interval = DEFAULT_INTERVAL
log.debug(
'An update_interval of 0 is not supported, '
'falling back to %s', interval
)
i_ptr = self.buckets.setdefault(interval, OrderedDict())
# Backend doesn't technically need to be present in the
# key, all we *really* need is the function reference, but
# having it there makes it easier to provide meaningful
# debug logging in the update threads.
i_ptr.setdefault((backend, update_func), []).append(id_)
else:
# Variable intervals are not supported for this backend, so
# fall back to the global interval for that fileserver. Since
# this backend doesn't support variable updates, we have
# nothing to pass to the backend's update func, so we'll just
# set the value to None.
try:
interval_key = '{0}_update_interval'.format(backend)
interval = self.opts[interval_key]
except KeyError:
interval = DEFAULT_INTERVAL
log.warning(
'%s key missing from configuration. Falling back to '
'default interval of %d seconds',
interval_key, interval
)
self.buckets.setdefault(
interval, OrderedDict())[(backend, update_func)] = None
def update_fileserver(self, interval, backends):
'''
Threading target which handles all updates for a given wait interval
'''
def _do_update():
log.debug(
'Performing fileserver updates for items with an update '
'interval of %d', interval
)
for backend, update_args in six.iteritems(backends):
backend_name, update_func = backend
try:
if update_args:
log.debug(
'Updating %s fileserver cache for the following '
'targets: %s', backend_name, update_args
)
args = (update_args,)
else:
log.debug('Updating %s fileserver cache', backend_name)
args = ()
update_func(*args)
except Exception as exc: # pylint: disable=broad-except
log.exception(
'Uncaught exception while updating %s fileserver '
'cache', backend_name
)
log.debug(
'Completed fileserver updates for items with an update '
'interval of %d, waiting %d seconds', interval, interval
)
condition = threading.Condition()
_do_update()
while True:
with condition:
condition.wait(interval)
_do_update()
def run(self):
'''
Start the update threads
'''
salt.utils.process.appendproctitle(self.__class__.__name__)
# Clean out the fileserver backend cache
salt.daemons.masterapi.clean_fsbackend(self.opts)
for interval in self.buckets:
self.update_threads[interval] = threading.Thread(
target=self.update_fileserver,
args=(interval, self.buckets[interval]),
)
self.update_threads[interval].start()
# Keep the process alive
while True:
time.sleep(60)
class Master(SMaster):
'''
The salt master server
'''
def __init__(self, opts):
'''
Create a salt master server instance
:param dict: The salt options
'''
if zmq and ZMQ_VERSION_INFO < (3, 2):
log.warning(
'You have a version of ZMQ less than ZMQ 3.2! There are '
'known connection keep-alive issues with ZMQ < 3.2 which '
'may result in loss of contact with minions. Please '
'upgrade your ZMQ!'
)
SMaster.__init__(self, opts)
def __set_max_open_files(self):
if not HAS_RESOURCE:
return
# Let's check to see how our max open files(ulimit -n) setting is
mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
if mof_h == resource.RLIM_INFINITY:
# Unclear what to do with infinity... macOS reports RLIM_INFINITY as
# hard limit,but raising to anything above soft limit fails...
mof_h = mof_s
log.info(
'Current values for max open files soft/hard setting: %s/%s',
mof_s, mof_h
)
# Let's grab, from the configuration file, the value to raise max open
# files to
mof_c = self.opts['max_open_files']
if mof_c > mof_h:
# The configured value is higher than what's allowed
log.info(
'The value for the \'max_open_files\' setting, %s, is higher '
'than the highest value the user running salt is allowed to '
'set (%s). Defaulting to %s.', mof_c, mof_h, mof_h
)
mof_c = mof_h
if mof_s < mof_c:
# There's room to raise the value. Raise it!
log.info('Raising max open files value to %s', mof_c)
resource.setrlimit(resource.RLIMIT_NOFILE, (mof_c, mof_h))
try:
mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
log.info(
'New values for max open files soft/hard values: %s/%s',
mof_s, mof_h
)
except ValueError:
# https://github.com/saltstack/salt/issues/1991#issuecomment-13025595
# A user under macOS reported that our 100000 default value is
# still too high.
log.critical(
'Failed to raise max open files setting to %s. If this '
'value is too low, the salt-master will most likely fail '
'to run properly.', mof_c
)
def _pre_flight(self):
'''
Run pre flight checks. If anything in this method fails then the master
should not start up.
'''
errors = []
critical_errors = []
try:
os.chdir('/')
except OSError as err:
errors.append(
'Cannot change to root directory ({0})'.format(err)
)
if self.opts.get('fileserver_verify_config', True):
# Avoid circular import
import salt.fileserver
fileserver = salt.fileserver.Fileserver(self.opts)
if not fileserver.servers:
errors.append(
'Failed to load fileserver backends, the configured backends '
'are: {0}'.format(', '.join(self.opts['fileserver_backend']))
)
else:
# Run init() for all backends which support the function, to
# double-check configuration
try:
fileserver.init()
except salt.exceptions.FileserverConfigError as exc:
critical_errors.append('{0}'.format(exc))
if not self.opts['fileserver_backend']:
errors.append('No fileserver backends are configured')
# Check to see if we need to create a pillar cache dir
if self.opts['pillar_cache'] and not os.path.isdir(os.path.join(self.opts['cachedir'], 'pillar_cache')):
try:
with salt.utils.files.set_umask(0o077):
os.mkdir(os.path.join(self.opts['cachedir'], 'pillar_cache'))
except OSError:
pass
if self.opts.get('git_pillar_verify_config', True):
try:
git_pillars = [
x for x in self.opts.get('ext_pillar', [])
if 'git' in x
and not isinstance(x['git'], six.string_types)
]
except TypeError:
git_pillars = []
critical_errors.append(
'Invalid ext_pillar configuration. It is likely that the '
'external pillar type was not specified for one or more '
'external pillars.'
)
if git_pillars:
try:
new_opts = copy.deepcopy(self.opts)
import salt.pillar.git_pillar
for repo in git_pillars:
new_opts['ext_pillar'] = [repo]
try:
git_pillar = salt.utils.gitfs.GitPillar(
new_opts,
repo['git'],
per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY,
global_only=salt.pillar.git_pillar.GLOBAL_ONLY)
except salt.exceptions.FileserverConfigError as exc:
critical_errors.append(exc.strerror)
finally:
del new_opts
if errors or critical_errors:
for error in errors:
log.error(error)
for error in critical_errors:
log.critical(error)
log.critical('Master failed pre flight checks, exiting\n')
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
def start(self):
'''
Turn on the master server components
'''
self._pre_flight()
log.info('salt-master is starting as user \'%s\'', salt.utils.user.get_user())
enable_sigusr1_handler()
enable_sigusr2_handler()
self.__set_max_open_files()
# Reset signals to default ones before adding processes to the process
# manager. We don't want the processes being started to inherit those
# signal handlers
with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
# Setup the secrets here because the PubServerChannel may need
# them as well.
SMaster.secrets['aes'] = {
'secret': multiprocessing.Array(
ctypes.c_char,
salt.utils.stringutils.to_bytes(
salt.crypt.Crypticle.generate_key_string()
)
),
'reload': salt.crypt.Crypticle.generate_key_string
}
log.info('Creating master process manager')
# Since there are children having their own ProcessManager we should wait for kill more time.
self.process_manager = salt.utils.process.ProcessManager(wait_for_kill=5)
pub_channels = []
log.info('Creating master publisher process')
log_queue = salt.log.setup.get_multiprocessing_logging_queue()
for _, opts in iter_transport_opts(self.opts):
chan = salt.transport.server.PubServerChannel.factory(opts)
chan.pre_fork(self.process_manager, kwargs={'log_queue': log_queue})
pub_channels.append(chan)
log.info('Creating master event publisher process')
self.process_manager.add_process(salt.utils.event.EventPublisher, args=(self.opts,))
if self.opts.get('reactor'):
if isinstance(self.opts['engines'], list):
rine = False
for item in self.opts['engines']:
if 'reactor' in item:
rine = True
break
if not rine:
self.opts['engines'].append({'reactor': {}})
else:
if 'reactor' not in self.opts['engines']:
log.info('Enabling the reactor engine')
self.opts['engines']['reactor'] = {}
salt.engines.start_engines(self.opts, self.process_manager)
# must be after channels
log.info('Creating master maintenance process')
self.process_manager.add_process(Maintenance, args=(self.opts,))
if self.opts.get('event_return'):
log.info('Creating master event return process')
self.process_manager.add_process(salt.utils.event.EventReturn, args=(self.opts,))
ext_procs = self.opts.get('ext_processes', [])
for proc in ext_procs:
log.info('Creating ext_processes process: %s', proc)
try:
mod = '.'.join(proc.split('.')[:-1])
cls = proc.split('.')[-1]
_tmp = __import__(mod, globals(), locals(), [cls], -1)
cls = _tmp.__getattribute__(cls)
self.process_manager.add_process(cls, args=(self.opts,))
except Exception: # pylint: disable=broad-except
log.error('Error creating ext_processes process: %s', proc)
if HAS_HALITE and 'halite' in self.opts:
log.info('Creating master halite process')
self.process_manager.add_process(Halite, args=(self.opts['halite'],))
# TODO: remove, or at least push into the transport stuff (pre-fork probably makes sense there)
if self.opts['con_cache']:
log.info('Creating master concache process')
self.process_manager.add_process(salt.utils.master.ConnectedCache, args=(self.opts,))
# workaround for issue #16315, race condition
log.debug('Sleeping for two seconds to let concache rest')
time.sleep(2)
log.info('Creating master request server process')
kwargs = {}
if salt.utils.platform.is_windows():
kwargs['log_queue'] = log_queue
kwargs['log_queue_level'] = salt.log.setup.get_multiprocessing_logging_level()
kwargs['secrets'] = SMaster.secrets
self.process_manager.add_process(
ReqServer,
args=(self.opts, self.key, self.master_key),
kwargs=kwargs,
name='ReqServer')
self.process_manager.add_process(
FileserverUpdate,
args=(self.opts,))
# Fire up SSDP discovery publisher
if self.opts['discovery']:
if salt.utils.ssdp.SSDPDiscoveryServer.is_available():
self.process_manager.add_process(salt.utils.ssdp.SSDPDiscoveryServer(
port=self.opts['discovery']['port'],
listen_ip=self.opts['interface'],
answer={'mapping': self.opts['discovery'].get('mapping', {})}).run)
else:
log.error('Unable to load SSDP: asynchronous IO is not available.')
if sys.version_info.major == 2:
log.error('You are using Python 2, please install "trollius" module to enable SSDP discovery.')
# Install the SIGINT/SIGTERM handlers if not done so far
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGINT, self._handle_signals)
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGTERM, self._handle_signals)
self.process_manager.run()
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
# escalate the signals to the process manager
self.process_manager.stop_restarting()
self.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
self.process_manager.kill_children()
time.sleep(1)
sys.exit(0)
class Halite(salt.utils.process.SignalHandlingProcess):
'''
Manage the Halite server
'''
def __init__(self, hopts, **kwargs):
'''
Create a halite instance
:param dict hopts: The halite options
'''
super(Halite, self).__init__(**kwargs)
self.hopts = hopts
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self.__init__(
state['hopts'],
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {
'hopts': self.hopts,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def run(self):
'''
Fire up halite!
'''
salt.utils.process.appendproctitle(self.__class__.__name__)
halite.start(self.hopts)
class ReqServer(salt.utils.process.SignalHandlingProcess):
'''
Starts up the master request server, minions send results to this
interface.
'''
def __init__(self, opts, key, mkey, secrets=None, **kwargs):
'''
Create a request server
:param dict opts: The salt options dictionary
:key dict: The user starting the server and the AES key
:mkey dict: The user starting the server and the RSA key
:rtype: ReqServer
:returns: Request server
'''
super(ReqServer, self).__init__(**kwargs)
self.opts = opts
self.master_key = mkey
# Prepare the AES key
self.key = key
self.secrets = secrets
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self.__init__(
state['opts'],
state['key'],
state['mkey'],
secrets=state['secrets'],
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {
'opts': self.opts,
'key': self.key,
'mkey': self.master_key,
'secrets': self.secrets,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
self.destroy(signum)
super(ReqServer, self)._handle_signals(signum, sigframe)
def __bind(self):
'''
Binds the reply server
'''
if self.log_queue is not None:
salt.log.setup.set_multiprocessing_logging_queue(self.log_queue)
if self.log_queue_level is not None:
salt.log.setup.set_multiprocessing_logging_level(self.log_queue_level)
salt.log.setup.setup_multiprocessing_logging(self.log_queue)
if self.secrets is not None:
SMaster.secrets = self.secrets
dfn = os.path.join(self.opts['cachedir'], '.dfn')
if os.path.isfile(dfn):
try:
if salt.utils.platform.is_windows() and not os.access(dfn, os.W_OK):
# Cannot delete read-only files on Windows.
os.chmod(dfn, stat.S_IRUSR | stat.S_IWUSR)
os.remove(dfn)
except os.error:
pass
# Wait for kill should be less then parent's ProcessManager.
self.process_manager = salt.utils.process.ProcessManager(name='ReqServer_ProcessManager',
wait_for_kill=1)
req_channels = []
tcp_only = True
for transport, opts in iter_transport_opts(self.opts):
chan = salt.transport.server.ReqServerChannel.factory(opts)
chan.pre_fork(self.process_manager)
req_channels.append(chan)
if transport != 'tcp':
tcp_only = False
kwargs = {}
if salt.utils.platform.is_windows():
kwargs['log_queue'] = self.log_queue
kwargs['log_queue_level'] = self.log_queue_level
# Use one worker thread if only the TCP transport is set up on
# Windows and we are using Python 2. There is load balancer
# support on Windows for the TCP transport when using Python 3.
if tcp_only and six.PY2 and int(self.opts['worker_threads']) != 1:
log.warning('TCP transport supports only 1 worker on Windows '
'when using Python 2.')
self.opts['worker_threads'] = 1
# Reset signals to default ones before adding processes to the process
# manager. We don't want the processes being started to inherit those
# signal handlers
with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
for ind in range(int(self.opts['worker_threads'])):
name = 'MWorker-{0}'.format(ind)
self.process_manager.add_process(MWorker,
args=(self.opts,
self.master_key,
self.key,
req_channels,
name),
kwargs=kwargs,
name=name)
self.process_manager.run()
def run(self):
'''
Start up the ReqServer
'''
self.__bind()
def destroy(self, signum=signal.SIGTERM):
if hasattr(self, 'process_manager'):
self.process_manager.stop_restarting()
self.process_manager.send_signal_to_processes(signum)
self.process_manager.kill_children()
# pylint: disable=W1701
def __del__(self):
self.destroy()
# pylint: enable=W1701
class MWorker(salt.utils.process.SignalHandlingProcess):
'''
The worker multiprocess instance to manage the backend operations for the
salt master.
'''
def __init__(self,
opts,
mkey,
key,
req_channels,
name,
**kwargs):
'''
Create a salt master worker process
:param dict opts: The salt options
:param dict mkey: The user running the salt master and the AES key
:param dict key: The user running the salt master and the RSA key
:rtype: MWorker
:return: Master worker
'''
kwargs['name'] = name
self.name = name
super(MWorker, self).__init__(**kwargs)
self.opts = opts
self.req_channels = req_channels
self.mkey = mkey
self.key = key
self.k_mtime = 0
self.stats = collections.defaultdict(lambda: {'mean': 0, 'runs': 0})
self.stat_clock = time.time()
# We need __setstate__ and __getstate__ to also pickle 'SMaster.secrets'.
# Otherwise, 'SMaster.secrets' won't be copied over to the spawned process
# on Windows since spawning processes on Windows requires pickling.
# These methods are only used when pickling so will not be used on
# non-Windows platforms.
def __setstate__(self, state):
super(MWorker, self).__init__(
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
self.opts = state['opts']
self.req_channels = state['req_channels']
self.mkey = state['mkey']
self.key = state['key']
self.k_mtime = state['k_mtime']
SMaster.secrets = state['secrets']
def __getstate__(self):
return {
'opts': self.opts,
'req_channels': self.req_channels,
'mkey': self.mkey,
'key': self.key,
'k_mtime': self.k_mtime,
'secrets': SMaster.secrets,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def _handle_signals(self, signum, sigframe):
for channel in getattr(self, 'req_channels', ()):
channel.close()
super(MWorker, self)._handle_signals(signum, sigframe)
def __bind(self):
'''
Bind to the local port
'''
# using ZMQIOLoop since we *might* need zmq in there
install_zmq()
self.io_loop = ZMQDefaultLoop()
self.io_loop.make_current()
for req_channel in self.req_channels:
req_channel.post_fork(self._handle_payload, io_loop=self.io_loop) # TODO: cleaner? Maybe lazily?
try:
self.io_loop.start()
except (KeyboardInterrupt, SystemExit):
# Tornado knows what to do
pass
@salt.ext.tornado.gen.coroutine
def _handle_payload(self, payload):
'''
The _handle_payload method is the key method used to figure out what
needs to be done with communication to the server
Example cleartext payload generated for 'salt myminion test.ping':
{'enc': 'clear',
'load': {'arg': [],
'cmd': 'publish',
'fun': 'test.ping',
'jid': '',
'key': 'alsdkjfa.,maljf-==adflkjadflkjalkjadfadflkajdflkj',
'kwargs': {'show_jid': False, 'show_timeout': False},
'ret': '',
'tgt': 'myminion',
'tgt_type': 'glob',
'user': 'root'}}
:param dict payload: The payload route to the appropriate handler
'''
key = payload['enc']
load = payload['load']
ret = {'aes': self._handle_aes,
'clear': self._handle_clear}[key](load)
raise salt.ext.tornado.gen.Return(ret)
def _post_stats(self, start, cmd):
'''
Calculate the master stats and fire events with stat info
'''
end = time.time()
duration = end - start
self.stats[cmd]['mean'] = (self.stats[cmd]['mean'] * (self.stats[cmd]['runs'] - 1) + duration) / self.stats[cmd]['runs']
if end - self.stat_clock > self.opts['master_stats_event_iter']:
# Fire the event with the stats and wipe the tracker
self.aes_funcs.event.fire_event({'time': end - self.stat_clock, 'worker': self.name, 'stats': self.stats}, tagify(self.name, 'stats'))
self.stats = collections.defaultdict(lambda: {'mean': 0, 'runs': 0})
self.stat_clock = end
def _handle_clear(self, load):
'''
Process a cleartext command
:param dict load: Cleartext payload
:return: The result of passing the load to a function in ClearFuncs corresponding to
the command specified in the load's 'cmd' key.
'''
log.trace('Clear payload received with command %s', load['cmd'])
cmd = load['cmd']
method = self.clear_funcs.get_method(cmd)
if not method:
return {}, {'fun': 'send_clear'}
if self.opts['master_stats']:
start = time.time()
self.stats[cmd]['runs'] += 1
ret = method(load), {'fun': 'send_clear'}
if self.opts['master_stats']:
self._post_stats(start, cmd)
return ret
def _handle_aes(self, data):
'''
Process a command sent via an AES key
:param str load: Encrypted payload
:return: The result of passing the load to a function in AESFuncs corresponding to
the command specified in the load's 'cmd' key.
'''
if 'cmd' not in data:
log.error('Received malformed command %s', data)
return {}
cmd = data['cmd']
log.trace('AES payload received with command %s', data['cmd'])
method = self.aes_funcs.get_method(cmd)
if not method:
return {}, {'fun': 'send'}
if self.opts['master_stats']:
start = time.time()
self.stats[cmd]['runs'] += 1
def run_func(data):
return self.aes_funcs.run_func(data['cmd'], data)
with StackContext(functools.partial(RequestContext,
{'data': data,
'opts': self.opts})):
ret = run_func(data)
if self.opts['master_stats']:
self._post_stats(start, cmd)
return ret
def run(self):
'''
Start a Master Worker
'''
salt.utils.process.appendproctitle(self.name)
self.clear_funcs = ClearFuncs(
self.opts,
self.key,
)
self.aes_funcs = AESFuncs(self.opts)
salt.utils.crypt.reinit_crypto()
self.__bind()
class TransportMethods(object):
'''
Expose methods to the transport layer, methods with their names found in
the class attribute 'expose_methods' will be exposed to the transport layer
via 'get_method'.
'''
expose_methods = ()
def get_method(self, name):
'''
Get a method which should be exposed to the transport layer
'''
if name in self.expose_methods:
try:
return getattr(self, name)
except AttributeError:
log.error("Expose method not found: %s", name)
else:
log.error("Requested method not exposed: %s", name)
# TODO: rename? No longer tied to "AES", just "encrypted" or "private" requests
class AESFuncs(TransportMethods):
'''
Set up functions that are available when the load is encrypted with AES
'''
expose_methods = (
'verify_minion', '_master_tops', '_ext_nodes', '_master_opts',
'_mine_get', '_mine', '_mine_delete', '_mine_flush', '_file_recv',
'_pillar', '_minion_event', '_handle_minion_event', '_return',
'_syndic_return', 'minion_runner', 'pub_ret', 'minion_pub',
'minion_publish', 'revoke_auth', 'run_func', '_serve_file',
'_file_find', '_file_hash', '_file_find_and_stat', '_file_list',
'_file_list_emptydirs', '_dir_list', '_symlink_list', '_file_envs',
)
def __init__(self, opts):
'''
Create a new AESFuncs
:param dict opts: The salt options
:rtype: AESFuncs
:returns: Instance for handling AES operations
'''
self.opts = opts
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
self.serial = salt.payload.Serial(opts)
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make a client
self.local = salt.client.get_local_client(self.opts['conf_file'])
# Create the master minion to access the external job cache
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False,
ignore_config_errors=True
)
self.__setup_fileserver()
self.masterapi = salt.daemons.masterapi.RemoteFuncs(opts)
def __setup_fileserver(self):
'''
Set the local file objects from the file server interface
'''
# Avoid circular import
import salt.fileserver
self.fs_ = salt.fileserver.Fileserver(self.opts)
self._serve_file = self.fs_.serve_file
self._file_find = self.fs_._find_file
self._file_hash = self.fs_.file_hash
self._file_hash_and_stat = self.fs_.file_hash_and_stat
self._file_list = self.fs_.file_list
self._file_list_emptydirs = self.fs_.file_list_emptydirs
self._dir_list = self.fs_.dir_list
self._symlink_list = self.fs_.symlink_list
self._file_envs = self.fs_.file_envs
def __verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
if not salt.utils.verify.valid_id(self.opts, id_):
return False
pub_path = os.path.join(self.opts['pki_dir'], 'minions', id_)
try:
pub = salt.crypt.get_rsa_pub_key(pub_path)
except (IOError, OSError):
log.warning(
'Salt minion claiming to be %s attempted to communicate with '
'master, but key could not be read and verification was denied.',
id_
)
return False
except (ValueError, IndexError, TypeError) as err:
log.error('Unable to load public key "%s": %s', pub_path, err)
try:
if salt.crypt.public_decrypt(pub, token) == b'salt':
return True
except ValueError as err:
log.error('Unable to decrypt token: %s', err)
log.error(
'Salt minion claiming to be %s has attempted to communicate with '
'the master and could not be verified', id_
)
return False
def verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
return self.__verify_minion(id_, token)
def __verify_minion_publish(self, clear_load):
'''
Verify that the passed information authorized a minion to execute
:param dict clear_load: A publication load from a minion
:rtype: bool
:return: A boolean indicating if the minion is allowed to publish the command in the load
'''
# Verify that the load is valid
if 'peer' not in self.opts:
return False
if not isinstance(self.opts['peer'], dict):
return False
if any(key not in clear_load for key in ('fun', 'arg', 'tgt', 'ret', 'tok', 'id')):
return False
# If the command will make a recursive publish don't run
if clear_load['fun'].startswith('publish.'):
return False
# Check the permissions for this minion
if not self.__verify_minion(clear_load['id'], clear_load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning(
'Minion id %s is not who it says it is and is attempting '
'to issue a peer command', clear_load['id']
)
return False
clear_load.pop('tok')
perms = []
for match in self.opts['peer']:
if re.match(match, clear_load['id']):
# This is the list of funcs/modules!
if isinstance(self.opts['peer'][match], list):
perms.extend(self.opts['peer'][match])
if ',' in clear_load['fun']:
# 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']]
clear_load['fun'] = clear_load['fun'].split(',')
arg_ = []
for arg in clear_load['arg']:
arg_.append(arg.split())
clear_load['arg'] = arg_
# finally, check the auth of the load
return self.ckminions.auth_check(
perms,
clear_load['fun'],
clear_load['arg'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
publish_validate=True)
def __verify_load(self, load, verify_keys):
'''
A utility function to perform common verification steps.
:param dict load: A payload received from a minion
:param list verify_keys: A list of strings that should be present in a
given load
:rtype: bool
:rtype: dict
:return: The original load (except for the token) if the load can be
verified. False if the load is invalid.
'''
if any(key not in load for key in verify_keys):
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return False
if 'tok' in load:
load.pop('tok')
return load
def _master_tops(self, load):
'''
Return the results from an external node classifier if one is
specified
:param dict load: A payload received from a minion
:return: The results from an external node classifier
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
return self.masterapi._master_tops(load, skip_verify=True)
# Needed so older minions can request master_tops
_ext_nodes = _master_tops
def _master_opts(self, load):
'''
Return the master options to the minion
:param dict load: A payload received from a minion
:rtype: dict
:return: The master options
'''
mopts = {}
file_roots = {}
envs = self._file_envs()
for saltenv in envs:
if saltenv not in file_roots:
file_roots[saltenv] = []
mopts['file_roots'] = file_roots
mopts['top_file_merging_strategy'] = self.opts['top_file_merging_strategy']
mopts['env_order'] = self.opts['env_order']
mopts['default_top'] = self.opts['default_top']
if load.get('env_only'):
return mopts
mopts['renderer'] = self.opts['renderer']
mopts['failhard'] = self.opts['failhard']
mopts['state_top'] = self.opts['state_top']
mopts['state_top_saltenv'] = self.opts['state_top_saltenv']
mopts['nodegroups'] = self.opts['nodegroups']
mopts['state_auto_order'] = self.opts['state_auto_order']
mopts['state_events'] = self.opts['state_events']
mopts['state_aggregate'] = self.opts['state_aggregate']
mopts['jinja_env'] = self.opts['jinja_env']
mopts['jinja_sls_env'] = self.opts['jinja_sls_env']
mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks']
mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks']
return mopts
def _mine_get(self, load):
'''
Gathers the data from the specified minions' mine
:param dict load: A payload received from a minion
:rtype: dict
:return: Mine data from the specified minions
'''
load = self.__verify_load(load, ('id', 'tgt', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_get(load, skip_verify=True)
def _mine(self, load):
'''
Store the mine data
:param dict load: A payload received from a minion
:rtype: bool
:return: True if the data has been stored in the mine
'''
load = self.__verify_load(load, ('id', 'data', 'tok'))
if load is False:
return {}
return self.masterapi._mine(load, skip_verify=True)
def _mine_delete(self, load):
'''
Allow the minion to delete a specific function from its own mine
:param dict load: A payload received from a minion
:rtype: bool
:return: Boolean indicating whether or not the given function was deleted from the mine
'''
load = self.__verify_load(load, ('id', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_delete(load)
def _mine_flush(self, load):
'''
Allow the minion to delete all of its own mine contents
:param dict load: A payload received from a minion
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_flush(load, skip_verify=True)
def _file_recv(self, load):
'''
Allows minions to send files to the master, files are sent to the
master file cache
'''
if any(key not in load for key in ('id', 'path', 'loc')):
return False
if not isinstance(load['path'], list):
return False
if not self.opts['file_recv']:
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
file_recv_max_size = 1024*1024 * self.opts['file_recv_max_size']
if 'loc' in load and load['loc'] < 0:
log.error('Invalid file pointer: load[loc] < 0')
return False
if len(load['data']) + load.get('loc', 0) > file_recv_max_size:
log.error(
'file_recv_max_size limit of %d MB exceeded! %s will be '
'truncated. To successfully push this file, adjust '
'file_recv_max_size to an integer (in MB) large enough to '
'accommodate it.', file_recv_max_size, load['path']
)
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return {}
load.pop('tok')
# Join path
sep_path = os.sep.join(load['path'])
# Path normalization should have been done by the sending
# minion but we can't guarantee it. Re-do it here.
normpath = os.path.normpath(sep_path)
# Ensure that this safety check is done after the path
# have been normalized.
if os.path.isabs(normpath) or '../' in load['path']:
# Can overwrite master files!!
return False
cpath = os.path.join(
self.opts['cachedir'],
'minions',
load['id'],
'files',
normpath)
# One last safety check here
if not os.path.normpath(cpath).startswith(self.opts['cachedir']):
log.warning(
'Attempt to write received file outside of master cache '
'directory! Requested path: %s. Access denied.', cpath
)
return False
cdir = os.path.dirname(cpath)
if not os.path.isdir(cdir):
try:
os.makedirs(cdir)
except os.error:
pass
if os.path.isfile(cpath) and load['loc'] != 0:
mode = 'ab'
else:
mode = 'wb'
with salt.utils.files.fopen(cpath, mode) as fp_:
if load['loc']:
fp_.seek(load['loc'])
fp_.write(salt.utils.stringutils.to_bytes(load['data']))
return True
def _pillar(self, load):
'''
Return the pillar data for the minion
:param dict load: Minion payload
:rtype: dict
:return: The pillar data for the minion
'''
if any(key not in load for key in ('id', 'grains')):
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
load['grains']['id'] = load['id']
pillar = salt.pillar.get_pillar(
self.opts,
load['grains'],
load['id'],
load.get('saltenv', load.get('env')),
ext=load.get('ext'),
pillar_override=load.get('pillar_override', {}),
pillarenv=load.get('pillarenv'),
extra_minion_data=load.get('extra_minion_data'))
data = pillar.compile_pillar()
self.fs_.update_opts()
if self.opts.get('minion_data_cache', False):
self.masterapi.cache.store('minions/{0}'.format(load['id']),
'data',
{'grains': load['grains'],
'pillar': data})
if self.opts.get('minion_data_cache_events') is True:
self.event.fire_event({'Minion data cache refresh': load['id']}, tagify(load['id'], 'refresh', 'minion'))
return data
def _minion_event(self, load):
'''
Receive an event from the minion and fire it on the master event
interface
:param dict load: The minion payload
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
# Route to master event bus
self.masterapi._minion_event(load)
# Process locally
self._handle_minion_event(load)
def _handle_minion_event(self, load):
'''
Act on specific events from minions
'''
id_ = load['id']
if load.get('tag', '') == '_salt_error':
log.error(
'Received minion error from [%s]: %s',
id_, load['data']['message']
)
for event in load.get('events', []):
event_data = event.get('data', {})
if 'minions' in event_data:
jid = event_data.get('jid')
if not jid:
continue
minions = event_data['minions']
try:
salt.utils.job.store_minions(
self.opts,
jid,
minions,
mminion=self.mminion,
syndic_id=id_)
except (KeyError, salt.exceptions.SaltCacheError) as exc:
log.error(
'Could not add minion(s) %s for job %s: %s',
minions, jid, exc
)
def _return(self, load):
'''
Handle the return data sent from the minions.
Takes the return, verifies it and fires it on the master event bus.
Typically, this event is consumed by the Salt CLI waiting on the other
end of the event bus but could be heard by any listener on the bus.
:param dict load: The minion payload
'''
if self.opts['require_minion_sign_messages'] and 'sig' not in load:
log.critical(
'_return: Master is requiring minions to sign their '
'messages, but there is no signature in this payload from '
'%s.', load['id']
)
return False
if 'sig' in load:
log.trace('Verifying signed event publish from minion')
sig = load.pop('sig')
this_minion_pubkey = os.path.join(self.opts['pki_dir'], 'minions/{0}'.format(load['id']))
serialized_load = salt.serializers.msgpack.serialize(load)
if not salt.crypt.verify_signature(this_minion_pubkey, serialized_load, sig):
log.info('Failed to verify event signature from minion %s.', load['id'])
if self.opts['drop_messages_signature_fail']:
log.critical(
'drop_messages_signature_fail is enabled, dropping '
'message from %s', load['id']
)
return False
else:
log.info('But \'drop_message_signature_fail\' is disabled, so message is still accepted.')
load['sig'] = sig
try:
salt.utils.job.store_job(
self.opts, load, event=self.event, mminion=self.mminion)
except salt.exceptions.SaltCacheError:
log.error('Could not store job information for load: %s', load)
def _syndic_return(self, load):
'''
Receive a syndic minion return and format it to look like returns from
individual minions.
:param dict load: The minion payload
'''
loads = load.get('load')
if not isinstance(loads, list):
loads = [load] # support old syndics not aggregating returns
for load in loads:
# Verify the load
if any(key not in load for key in ('return', 'jid', 'id')):
continue
# if we have a load, save it
if load.get('load'):
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](load['jid'], load['load'])
# Register the syndic
syndic_cache_path = os.path.join(self.opts['cachedir'], 'syndics', load['id'])
if not os.path.exists(syndic_cache_path):
path_name = os.path.split(syndic_cache_path)[0]
if not os.path.exists(path_name):
os.makedirs(path_name)
with salt.utils.files.fopen(syndic_cache_path, 'w') as wfh:
wfh.write('')
# Format individual return loads
for key, item in six.iteritems(load['return']):
ret = {'jid': load['jid'],
'id': key}
ret.update(item)
if 'master_id' in load:
ret['master_id'] = load['master_id']
if 'fun' in load:
ret['fun'] = load['fun']
if 'arg' in load:
ret['fun_args'] = load['arg']
if 'out' in load:
ret['out'] = load['out']
if 'sig' in load:
ret['sig'] = load['sig']
self._return(ret)
def minion_runner(self, clear_load):
'''
Execute a runner from a minion, return the runner's function data
:param dict clear_load: The minion payload
:rtype: dict
:return: The runner function data
'''
load = self.__verify_load(clear_load, ('fun', 'arg', 'id', 'tok'))
if load is False:
return {}
else:
return self.masterapi.minion_runner(clear_load)
def pub_ret(self, load):
'''
Request the return data from a specific jid, only allowed
if the requesting minion also initialted the execution.
:param dict load: The minion payload
:rtype: dict
:return: Return data corresponding to a given JID
'''
load = self.__verify_load(load, ('jid', 'id', 'tok'))
if load is False:
return {}
# Check that this minion can access this data
auth_cache = os.path.join(
self.opts['cachedir'],
'publish_auth')
if not os.path.isdir(auth_cache):
os.makedirs(auth_cache)
jid_fn = os.path.join(auth_cache, six.text_type(load['jid']))
with salt.utils.files.fopen(jid_fn, 'r') as fp_:
if not load['id'] == fp_.read():
return {}
# Grab the latest and return
return self.local.get_cache_returns(load['jid'])
def minion_pub(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands:
.. code-block:: bash
peer:
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion pay
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_pub(clear_load)
def minion_publish(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
.. code-block:: bash
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion payload
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_publish(clear_load)
def revoke_auth(self, load):
'''
Allow a minion to request revocation of its own key
:param dict load: The minion payload
:rtype: dict
:return: If the load is invalid, it may be returned. No key operation is performed.
:rtype: bool
:return: True if key was revoked, False if not
'''
load = self.__verify_load(load, ('id', 'tok'))
if not self.opts.get('allow_minion_key_revoke', False):
log.warning(
'Minion %s requested key revoke, but allow_minion_key_revoke '
'is set to False', load['id']
)
return load
if load is False:
return load
else:
return self.masterapi.revoke_auth(load)
def run_func(self, func, load):
'''
Wrapper for running functions executed with AES encryption
:param function func: The function to run
:return: The result of the master function that was called
'''
# Don't honor private functions
if func.startswith('__'):
# TODO: return some error? Seems odd to return {}
return {}, {'fun': 'send'}
# Run the func
if hasattr(self, func):
try:
start = time.time()
ret = getattr(self, func)(load)
log.trace(
'Master function call %s took %s seconds',
func, time.time() - start
)
except Exception: # pylint: disable=broad-except
ret = ''
log.error('Error in function %s:\n', func, exc_info=True)
else:
log.error(
'Received function %s which is unavailable on the master, '
'returning False', func
)
return False, {'fun': 'send'}
# Don't encrypt the return value for the _return func
# (we don't care about the return value, so why encrypt it?)
if func == '_return':
return ret, {'fun': 'send'}
if func == '_pillar' and 'id' in load:
if load.get('ver') != '2' and self.opts['pillar_version'] == 1:
# Authorized to return old pillar proto
return ret, {'fun': 'send'}
return ret, {'fun': 'send_private', 'key': 'pillar', 'tgt': load['id']}
# Encrypt the return
return ret, {'fun': 'send'}
class ClearFuncs(TransportMethods):
'''
Set up functions that are safe to execute when commands sent to the master
without encryption and authentication
'''
# These methods will be exposed to the transport layer by
# MWorker._handle_clear
expose_methods = (
'ping', 'publish', 'publish_batch', 'get_token', 'mk_token', 'wheel', 'runner',
)
# The ClearFuncs object encapsulates the functions that can be executed in
# the clear:
# publish (The publish from the LocalClient)
# _auth
def __init__(self, opts, key):
self.opts = opts
self.key = key
# Create the event manager
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
# Make a client
self.local = salt.client.get_local_client(self.opts['conf_file'])
# Make an minion checker object
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make an Auth object
self.loadauth = salt.auth.LoadAuth(opts)
# Stand up the master Minion to access returner data
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False,
ignore_config_errors=True
)
# Make a wheel object
self.wheel_ = salt.wheel.Wheel(opts)
# Make a masterapi object
self.masterapi = salt.daemons.masterapi.LocalFuncs(opts, key)
def runner(self, clear_load):
'''
Send a master control function back to the runner system
'''
# All runner ops pass through eauth
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(clear_load)
# Authenticate
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
error = auth_check.get('error')
if error:
# Authentication error occurred: do not continue.
return {'error': error}
# Authorize
username = auth_check.get('username')
if auth_type != 'user':
runner_check = self.ckminions.runner_check(
auth_check.get('auth_list', []),
clear_load['fun'],
clear_load.get('kwarg', {})
)
if not runner_check:
return {'error': {'name': err_name,
'message': 'Authentication failure of type "{0}" occurred for '
'user {1}.'.format(auth_type, username)}}
elif isinstance(runner_check, dict) and 'error' in runner_check:
# A dictionary with an error name/message was handled by ckminions.runner_check
return runner_check
# No error occurred, consume sensitive settings from the clear_load if passed.
for item in sensitive_load_keys:
clear_load.pop(item, None)
else:
if 'user' in clear_load:
username = clear_load['user']
if salt.auth.AuthUser(username).is_sudo():
username = self.opts.get('user', 'root')
else:
username = salt.utils.user.get_user()
# Authorized. Do the job!
try:
fun = clear_load.pop('fun')
runner_client = salt.runner.RunnerClient(self.opts)
return runner_client.asynchronous(fun,
clear_load.get('kwarg', {}),
username)
except Exception as exc: # pylint: disable=broad-except
log.error('Exception occurred while introspecting %s: %s', fun, exc)
return {'error': {'name': exc.__class__.__name__,
'args': exc.args,
'message': six.text_type(exc)}}
def wheel(self, clear_load):
'''
Send a master control function back to the wheel system
'''
# All wheel ops pass through eauth
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(clear_load)
# Authenticate
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
error = auth_check.get('error')
if error:
# Authentication error occurred: do not continue.
return {'error': error}
# Authorize
username = auth_check.get('username')
if auth_type != 'user':
wheel_check = self.ckminions.wheel_check(
auth_check.get('auth_list', []),
clear_load['fun'],
clear_load.get('kwarg', {})
)
if not wheel_check:
return {'error': {'name': err_name,
'message': 'Authentication failure of type "{0}" occurred for '
'user {1}.'.format(auth_type, username)}}
elif isinstance(wheel_check, dict) and 'error' in wheel_check:
# A dictionary with an error name/message was handled by ckminions.wheel_check
return wheel_check
# No error occurred, consume sensitive settings from the clear_load if passed.
for item in sensitive_load_keys:
clear_load.pop(item, None)
else:
if 'user' in clear_load:
username = clear_load['user']
if salt.auth.AuthUser(username).is_sudo():
username = self.opts.get('user', 'root')
else:
username = salt.utils.user.get_user()
# Authorized. Do the job!
try:
jid = salt.utils.jid.gen_jid(self.opts)
fun = clear_load.pop('fun')
tag = tagify(jid, prefix='wheel')
data = {'fun': "wheel.{0}".format(fun),
'jid': jid,
'tag': tag,
'user': username}
self.event.fire_event(data, tagify([jid, 'new'], 'wheel'))
ret = self.wheel_.call_func(fun, full_return=True, **clear_load)
data['return'] = ret['return']
data['success'] = ret['success']
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
except Exception as exc: # pylint: disable=broad-except
log.error('Exception occurred while introspecting %s: %s', fun, exc)
data['return'] = 'Exception occurred in wheel {0}: {1}: {2}'.format(
fun,
exc.__class__.__name__,
exc,
)
data['success'] = False
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
def mk_token(self, clear_load):
'''
Create and return an authentication token, the clear load needs to
contain the eauth key and the needed authentication creds.
'''
token = self.loadauth.mk_token(clear_load)
if not token:
log.warning('Authentication failure of type "eauth" occurred.')
return ''
return token
def get_token(self, clear_load):
'''
Return the name associated with a token or False if the token is invalid
'''
if 'token' not in clear_load:
return False
return self.loadauth.get_tok(clear_load['token'])
def publish_batch(self, clear_load, minions, missing):
batch_load = {}
batch_load.update(clear_load)
batch = salt.cli.batch_async.BatchAsync(
self.local.opts,
functools.partial(self._prep_jid, clear_load, {}),
batch_load
)
ioloop = salt.ext.tornado.ioloop.IOLoop.current()
ioloop.add_callback(batch.start)
return {
'enc': 'clear',
'load': {
'jid': batch.batch_jid,
'minions': minions,
'missing': missing
}
}
def publish(self, clear_load):
'''
This method sends out publications to the minions, it can only be used
by the LocalClient.
'''
extra = clear_load.get('kwargs', {})
publisher_acl = salt.acl.PublisherACL(self.opts['publisher_acl_blacklist'])
if publisher_acl.user_is_blacklisted(clear_load['user']) or \
publisher_acl.cmd_is_blacklisted(clear_load['fun']):
log.error(
'%s does not have permissions to run %s. Please contact '
'your local administrator if you believe this is in '
'error.\n', clear_load['user'], clear_load['fun']
)
return {'error': {'name': 'AuthorizationError',
'message': 'Authorization error occurred.'}}
# Retrieve the minions list
delimiter = clear_load.get('kwargs', {}).get('delimiter', DEFAULT_TARGET_DELIM)
_res = self.ckminions.check_minions(
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
delimiter
)
minions = _res.get('minions', list())
missing = _res.get('missing', list())
ssh_minions = _res.get('ssh_minions', False)
# Check for external auth calls and authenticate
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(extra)
if auth_type == 'user':
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
else:
auth_check = self.loadauth.check_authentication(extra, auth_type)
# Setup authorization list variable and error information
auth_list = auth_check.get('auth_list', [])
err_msg = 'Authentication failure of type "{0}" occurred.'.format(auth_type)
if auth_check.get('error'):
# Authentication error occurred: do not continue.
log.warning(err_msg)
return {'error': {'name': 'AuthenticationError',
'message': 'Authentication error occurred.'}}
# All Token, Eauth, and non-root users must pass the authorization check
if auth_type != 'user' or (auth_type == 'user' and auth_list):
# Authorize the request
authorized = self.ckminions.auth_check(
auth_list,
clear_load['fun'],
clear_load['arg'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
minions=minions,
# always accept find_job
whitelist=['saltutil.find_job'],
)
if not authorized:
# Authorization error occurred. Do not continue.
if auth_type == 'eauth' and not auth_list and 'username' in extra and 'eauth' in extra:
log.debug('Auth configuration for eauth "%s" and user "%s" is empty', extra['eauth'], extra['username'])
log.warning(err_msg)
return {'error': {'name': 'AuthorizationError',
'message': 'Authorization error occurred.'}}
# Perform some specific auth_type tasks after the authorization check
if auth_type == 'token':
username = auth_check.get('username')
clear_load['user'] = username
log.debug('Minion tokenized user = "%s"', username)
elif auth_type == 'eauth':
# The username we are attempting to auth with
clear_load['user'] = self.loadauth.load_name(extra)
# If we order masters (via a syndic), don't short circuit if no minions
# are found
if not self.opts.get('order_masters'):
# Check for no minions
if not minions:
return {
'enc': 'clear',
'load': {
'jid': None,
'minions': minions,
'error': 'Master could not resolve minions for target {0}'.format(clear_load['tgt'])
}
}
if extra.get('batch', None):
return self.publish_batch(clear_load, minions, missing)
jid = self._prep_jid(clear_load, extra)
if jid is None:
return {'enc': 'clear',
'load': {'error': 'Master failed to assign jid'}}
payload = self._prep_pub(minions, jid, clear_load, extra, missing)
# Send it!
self._send_ssh_pub(payload, ssh_minions=ssh_minions)
self._send_pub(payload)
return {
'enc': 'clear',
'load': {
'jid': clear_load['jid'],
'minions': minions,
'missing': missing
}
}
def _prep_auth_info(self, clear_load):
sensitive_load_keys = []
key = None
if 'token' in clear_load:
auth_type = 'token'
err_name = 'TokenAuthenticationError'
sensitive_load_keys = ['token']
elif 'eauth' in clear_load:
auth_type = 'eauth'
err_name = 'EauthAuthenticationError'
sensitive_load_keys = ['username', 'password']
else:
auth_type = 'user'
err_name = 'UserAuthenticationError'
key = self.key
return auth_type, err_name, key, sensitive_load_keys
def _prep_jid(self, clear_load, extra):
'''
Return a jid for this publication
'''
# the jid in clear_load can be None, '', or something else. this is an
# attempt to clean up the value before passing to plugins
passed_jid = clear_load['jid'] if clear_load.get('jid') else None
nocache = extra.get('nocache', False)
# Retrieve the jid
fstr = '{0}.prep_jid'.format(self.opts['master_job_cache'])
try:
# Retrieve the jid
jid = self.mminion.returners[fstr](nocache=nocache,
passed_jid=passed_jid)
except (KeyError, TypeError):
# The returner is not present
msg = (
'Failed to allocate a jid. The requested returner \'{0}\' '
'could not be loaded.'.format(fstr.split('.')[0])
)
log.error(msg)
return {'error': msg}
return jid
def _send_pub(self, load):
'''
Take a load and send it across the network to connected minions
'''
for transport, opts in iter_transport_opts(self.opts):
chan = salt.transport.server.PubServerChannel.factory(opts)
chan.publish(load)
@property
def ssh_client(self):
if not hasattr(self, '_ssh_client'):
self._ssh_client = salt.client.ssh.client.SSHClient(mopts=self.opts)
return self._ssh_client
def _send_ssh_pub(self, load, ssh_minions=False):
'''
Take a load and send it across the network to ssh minions
'''
if self.opts['enable_ssh_minions'] is True and ssh_minions is True:
log.debug('Send payload to ssh minions')
threading.Thread(target=self.ssh_client.cmd, kwargs=load).start()
def _prep_pub(self, minions, jid, clear_load, extra, missing):
'''
Take a given load and perform the necessary steps
to prepare a publication.
TODO: This is really only bound by temporal cohesion
and thus should be refactored even further.
'''
clear_load['jid'] = jid
delimiter = clear_load.get('kwargs', {}).get('delimiter', DEFAULT_TARGET_DELIM)
# TODO Error reporting over the master event bus
self.event.fire_event({'minions': minions}, clear_load['jid'])
new_job_load = {
'jid': clear_load['jid'],
'tgt_type': clear_load['tgt_type'],
'tgt': clear_load['tgt'],
'user': clear_load['user'],
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'minions': minions,
'missing': missing,
}
# Announce the job on the event bus
self.event.fire_event(new_job_load, tagify([clear_load['jid'], 'new'], 'job'))
if self.opts['ext_job_cache']:
fstr = '{0}.save_load'.format(self.opts['ext_job_cache'])
save_load_func = True
# Get the returner's save_load arg_spec.
try:
arg_spec = salt.utils.args.get_function_argspec(self.mminion.returners[fstr])
# Check if 'minions' is included in returner's save_load arg_spec.
# This may be missing in custom returners, which we should warn about.
if 'minions' not in arg_spec.args:
log.critical(
'The specified returner used for the external job cache '
'\'%s\' does not have a \'minions\' kwarg in the returner\'s '
'save_load function.', self.opts['ext_job_cache']
)
except (AttributeError, KeyError):
save_load_func = False
log.critical(
'The specified returner used for the external job cache '
'"%s" does not have a save_load function!',
self.opts['ext_job_cache']
)
if save_load_func:
try:
self.mminion.returners[fstr](clear_load['jid'], clear_load, minions=minions)
except Exception: # pylint: disable=broad-except
log.critical(
'The specified returner threw a stack trace:\n',
exc_info=True
)
# always write out to the master job caches
try:
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](clear_load['jid'], clear_load, minions)
except KeyError:
log.critical(
'The specified returner used for the master job cache '
'"%s" does not have a save_load function!',
self.opts['master_job_cache']
)
except Exception: # pylint: disable=broad-except
log.critical(
'The specified returner threw a stack trace:\n',
exc_info=True
)
# Set up the payload
payload = {'enc': 'aes'}
# Altering the contents of the publish load is serious!! Changes here
# break compatibility with minion/master versions and even tiny
# additions can have serious implications on the performance of the
# publish commands.
#
# In short, check with Thomas Hatch before you even think about
# touching this stuff, we can probably do what you want to do another
# way that won't have a negative impact.
load = {
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'tgt': clear_load['tgt'],
'jid': clear_load['jid'],
'ret': clear_load['ret'],
}
# if you specified a master id, lets put that in the load
if 'master_id' in self.opts:
load['master_id'] = self.opts['master_id']
# if someone passed us one, use that
if 'master_id' in extra:
load['master_id'] = extra['master_id']
# Only add the delimiter to the pub data if it is non-default
if delimiter != DEFAULT_TARGET_DELIM:
load['delimiter'] = delimiter
if 'id' in extra:
load['id'] = extra['id']
if 'tgt_type' in clear_load:
load['tgt_type'] = clear_load['tgt_type']
if 'to' in clear_load:
load['to'] = clear_load['to']
if 'kwargs' in clear_load:
if 'ret_config' in clear_load['kwargs']:
load['ret_config'] = clear_load['kwargs'].get('ret_config')
if 'metadata' in clear_load['kwargs']:
load['metadata'] = clear_load['kwargs'].get('metadata')
if 'module_executors' in clear_load['kwargs']:
load['module_executors'] = clear_load['kwargs'].get('module_executors')
if 'executor_opts' in clear_load['kwargs']:
load['executor_opts'] = clear_load['kwargs'].get('executor_opts')
if 'ret_kwargs' in clear_load['kwargs']:
load['ret_kwargs'] = clear_load['kwargs'].get('ret_kwargs')
if 'user' in clear_load:
log.info(
'User %s Published command %s with jid %s',
clear_load['user'], clear_load['fun'], clear_load['jid']
)
load['user'] = clear_load['user']
else:
log.info(
'Published command %s with jid %s',
clear_load['fun'], clear_load['jid']
)
log.debug('Published command details %s', load)
return load
def ping(self, clear_load):
'''
Send the load back to the sender.
'''
return clear_load
|
mission_commander.py
|
'''
Copyright 2019, David Pierce Walker-Howell, All rights reserved
Author: David Pierce Walker-Howell<piercedhowell@gmail.com>
Last Modified 08/05/2019
Description: The mission commander executes a mission based on a mission.json file
when the vehicle is in mission mode. Missions are started by pressing
the auto button on the rear of perseverance. Missions are a queue of tasks
to complete.
'''
import sys
import os
PARAM_PATH = os.path.join("..", "Params")
sys.path.append(PARAM_PATH)
MECHOS_CONFIG_FILE_PATH = os.path.join(PARAM_PATH, "mechos_network_configs.txt")
from mechos_network_configs import MechOS_Network_Configs
MESSAGE_TYPES_PATH = os.path.join("..", "..", "..", "Message_Types")
sys.path.append(MESSAGE_TYPES_PATH)
from neural_network_message import Neural_Network_Message
from MechOS import mechos
from MechOS.simple_messages.int import Int
from MechOS.simple_messages.bool import Bool
from MechOS.simple_messages.float_array import Float_Array
import threading
import time
import json
import serial
import struct
#Import all the tasks
from drive_functions import Drive_Functions
from waypoint_task import Waypoint_Task
from gate_no_vision_task import Gate_No_Vision_Task
from initial_dive_task import Initial_Dive_Task
class Mission_Commander(threading.Thread):
'''
Mission Commander is the main controller of the sub's autonomy to execute missions.
The mission commander will exectue the tasks of a mission from a mission file of type
'.json'.
'''
def __init__(self):
'''
Initialize the mission given the mission .json file.
Parameters:
sensor_driver: The sensor driver thread object so
the drive functions have access to the sensor data.
Returns:
N/A
'''
threading.Thread.__init__(self)
self.mission_file = None
#Initialize the drive functions
self.drive_functions = Drive_Functions()
#Get the mechos network parameters
configs = MechOS_Network_Configs(MECHOS_CONFIG_FILE_PATH)._get_network_parameters()
#Connect to parameters server
self.param_serv = mechos.Parameter_Server_Client(configs["param_ip"], configs["param_port"])
self.param_serv.use_parameter_database(configs["param_server_path"])
#MechOS node to connect the mission commander to the mechos network
self.mission_commander_node = mechos.Node("MISSION_COMMANDER", '192.168.1.14', '192.168.1.14')
#subscriber to listen if the movement mode is set to be autonomous mission mode
self.movement_mode_subscriber = self.mission_commander_node.create_subscriber("MOVEMENT_MODE", Int(), self._update_movement_mode_callback, protocol="tcp")
#subscriber to listen if the mission informatin has changed.
self.update_mission_info_subscriber = self.mission_commander_node.create_subscriber("MISSON_SELECT", Bool(), self._update_mission_info_callback, protocol="tcp")
#subscriber to listen if neural network data is available
self.neural_network_subscriber = self.mission_commander_node.create_subscriber("NEURAL_NET", Neural_Network_Message(), self._update_neural_net_callback, protocol="tcp")
self.neural_net_data = [0, 0, 0, 0, 0, 0]
#Publisher to be able to kill the sub within the mission
self.kill_sub_publisher = self.mission_commander_node.create_publisher("KILL_SUB", Bool(), protocol="tcp")
#Publisher to zero the position of the sub.
self.zero_position_publisher = self.mission_commander_node.create_publisher("ZERO_POSITION", Bool(), protocol="tcp")
#Set up serial com to read the autonomous button
com_port = self.param_serv.get_param("COM_Ports/auto")
self.auto_serial = serial.Serial(com_port, 9600)
#Set up a thread to listen to request from the GUI
self.command_listener_thread = threading.Thread(target=self._command_listener)
self.command_listener_thread.daemon = True
self.command_listener_thread_run = True
self.command_listener_thread.start()
self.mission_tasks = [] #A list of the mission tasks
self.mission_data = None #The .json file structure loaded into python dictionary
self.run_thread = True
self.daemon = True
self.mission_mode = False #If true, then the subs navigation system is ready for missions
self.mission_live = False #Mission live corresponds to the autonomous buttons state.
#Variable to keep the current sensor data(position) of the sub.
self.current_position = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
#load the mission data
self._update_mission_info_callback(None)
def _update_mission_info_callback(self, misc):
'''
If the update mission info button is pressed in the mission planner widget,
update the mission info here. Note that the mission being live should go to
false.
Parameters:
misc: Nothing used.
Returns:
N/A
'''
#Get the new mission file from the parameter server.
self.mission_file = self.param_serv.get_param("Missions/mission_file")
self.mission_live = False
print("[INFO]: New Mission file set as %s" % self.mission_file)
#Parse the mission file
self.parse_mission()
def _update_movement_mode_callback(self, movement_mode):
'''
The callback function to select which navigation controller mode is being used.
If it is set to 3, then the navigation controller is ready for autonomous mode.
Parameters:
movement_mode: Raw byte of the mode.
Returns:
N/A
'''
if(movement_mode == 3):
print("[INFO]: Mission Commander Ready to Run Missions. Sub Initially Killed")
#Initially have the sub killed when switched to mission commander mode
self.kill_sub_publisher.publish(1)
self.mission_mode = True
else:
if(self.mission_mode == True):
print("[INFO]: Exited Mission Command Mode.")
self.mission_mode = False
self.mission_live = False
self.drive_functions.drive_functions_enabled = False
def _update_neural_net_callback(self, neural_net_data):
self.neural_net_data = neural_net_data
print(self.neural_net_data)
def _command_listener(self):
'''
The thread to run update requests from the GUI to tell the mission commander
when it is ready to run missions and what missions to do.
Parameters:
N/A
Returns:
N/A
'''
while self.command_listener_thread_run:
try:
#Recieve commands from the the GUI and/or Mission Commander
self.mission_commander_node.spin_once()
if(self.auto_serial.in_waiting):
auto_pressed = (self.auto_serial.read(13)).decode()
self.auto_serial.read(2) #Read the excess two bytes
print(auto_pressed)
if(auto_pressed == "Auto Status:1" and self.mission_mode):
print("[INFO]: Mission Now Live")
self.mission_live = True
self.drive_functions.drive_functions_enabled = True
elif(auto_pressed == "Auto Status:0" and self.mission_mode):
print("[INFO]: Mission is no longer Live.")
self.mission_live = False
self.drive_functions.drive_functions_enabled = False
except Exception as e:
print("[ERROR]: Could not properly recieved messages in command listener. Error:", e)
time.sleep(0.001)
def parse_mission(self):
'''
Parse the mission .json file and generate the code for each task. Save
the individual task in the mission_tasks list attribute.
Parameters:
N/A
Returns:
N/A
'''
self.mission_tasks = [] #Reset the mission tasks
with open(self.mission_file, 'r') as f:
self.mission_data = json.load(f)
#Count the number of tasks in the mission
self.num_tasks = len(self.mission_data)
print("[INFO]: Parsing Mission. Number of tasks for mission is", self.num_tasks, ".")
task_keys = self.mission_data.keys()
for task_index, task in enumerate(task_keys):
#Get the task type and name
task_type = self.mission_data[task]["type"]
if(task_type == "Initial_Dive"):
initial_dive_task = Initial_Dive_Task(self.mission_data[task], self.drive_functions)
self.mission_tasks.append(initial_dive_task)
#generate waypoint task
elif(task_type == "Waypoint"):
waypoint_task = Waypoint_Task(self.mission_data[task], self.drive_functions)
self.mission_tasks.append(waypoint_task)
#Generate Gate with no vision task
elif(task_type == "Gate_No_Vision"):
gate_no_vision = Gate_No_Vision_Task(self.mission_data[task], self.drive_functions)
self.mission_tasks.append(gate_no_vision)
def run(self):
'''
Run the mission tasks sequentially.
Parameters:
N/A
Returns:
N/A
'''
while(self.run_thread):
try:
#If in Mission mode, listen to see if the autonomous mode button is
#pressed.
if(self.mission_mode and self.mission_live):
#self.mission_live = True
print("[INFO]: Starting Mission")
#When mission is live, run the mission
#Unkill the sub
self.kill_sub_publisher.publish(False)
#Zero position of the sensors
self.zero_position_publisher.publish(True)
time.sleep(0.1) #wait for the messae to make it
#Iterate through each task in the mission and run them
for task_id, task in enumerate(self.mission_tasks):
if((self.mission_live == False) or (self.mission_mode == False)):
print("[WARNING]: Exiting mission because mission live status or mission mode status changed.")
break
print("[INFO]: Starting Task %s: %s. Mission task %d/%d" %(task.type, task.name, task_id + 1, self.num_tasks))
task_success = task.run()
if(task_success):
print("[INFO]: Successfully completed task %s." % task.name)
continue
else:
print("[INFO]: Failed to complete task %s." % task.name)
print("[INFO]: Finished Mission. Killing the sub.")
self.mission_live = False
#Kill the sub.
self.kill_sub_publisher.publish(True)
except:
print("[ERROR]: Encountered an Error in Mission Commander. Error:", sys.exc_info()[0])
raise
if __name__ == "__main__":
mission_commander = Mission_Commander()
mission_commander.run()
|
twitter.py
|
import threading
import subprocess
from datetime import datetime
import json
from os import listdir
from os.path import isfile, join
from pymongo import MongoClient
from flask import jsonify
from flask import Blueprint
from flask import flash
from flask import g
from flask import render_template
from flask import request
from flask import session
from flaskr.db import get_db
from flaskr.auth import login_required
bp = Blueprint("twitter", __name__, url_prefix="/twitter")
def run_tweet_collect(keywords: str, start_date: str, start_time: str, duration: str, directory: str, data_filename: str, summary_filename: str) -> None:
"""
Starts a twitter collection with the given parameters
and restarts the collection if it gets interrupted
Parameters:
-----------
keywords: List of keywords on which tweets are be collected seperated by ' OR '
start_date: The start date for the collection
start_time: The start time for the collection
duration: Duration in minutes for for which tweets are to be collected
directory: Full path to store the data and summary files
data_filename: Filename for the Data file
summary_filename: Filename for the Summary file
"""
# MODIFIED ON 09/30/2020
# Coverting start_date into a datetime object
if start_date == '':
start_date = datetime.fromisoformat(str(datetime.now().date()))
else:
start_date = datetime.fromisoformat(start_date)
# Setting start_time, faillog and exit_code
start_time = datetime(start_date.year, start_date.month, start_date.day, int(start_time.split(':')[0]), int(start_time.split(':')[1]))
faillog_filename = directory + 'FailLog.txt'
exit_code = None
while exit_code != 0:
with open(faillog_filename, 'a+') as faillog:
if exit_code is None:
# Starting the collection for the first time
faillog.write('Collection with Keywords: ' + keywords + ' started on: ' + str(start_time.date()) + '\n')
faillog.write('Start: ' + str(start_time.time()) + '\n')
else:
# Restarting the collection after an interruption
faillog.write('Restart: ' + str(datetime.now().time()))
# Calling RevisedTweetCollect.py as a subprocess
process = subprocess.Popen(['python', 'flaskr/static/collect/RevisedTweetCollect.py', keywords, directory + data_filename, directory + summary_filename, 'flaskr/static/KeySet1.txt', duration, str(start_time.hour), str(start_time.minute), '--date', str(start_time.month), str(start_time.day), str(start_time.year)])
# Wait for the subprocess to finish
process.wait()
exit_code = process.returncode
with open(faillog_filename, 'a+') as faillog:
if exit_code == 0:
# Completed collection
faillog.write('End: ' + str(datetime.now().time()) + '\n\n')
else:
# Collection failed
faillog.write('Fail: ' + str(datetime.now().time()) + '\n')
"""
OLDER CODE
# When you want to end the collection
start_time = datetime(datetime.now().year, datetime.now().month, datetime.now().day, int(start_time.split(':')[0]), int(start_time.split(':')[1]))
end_time = start_time + timedelta(minutes=int(duration))
while (datetime.now() <= end_time):
# Start Tweet Collection if not running
if not is_running:
with open('C:/Users/bncmo/Downloads/FailLog.txt', 'a+') as error_file:
error_file.write(str(datetime.now()) + '\n')
pid = subprocess.Popen(['python', 'flaskr/static/collect/RevisedTweetCollect.py', keywords, file_name + '.txt', summary_file_name + '.txt', 'flaskr\static\KeySet1.txt', duration, str(start_time.hour), str(start_time.minute)]).pid
search_pid = 'pid eq ' + str(pid)
# Check if the Tweet Collection is running
process = subprocess.Popen(['tasklist', '/fi', search_pid, '/nh'], shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = process.communicate()
is_running = str(pid) in str(stdout)
time.sleep(10)
"""
def run_query(collection_name, process_name):
"""
Runs the requested Analysis query on MongoDb
Parameters:
-----------
collection_name: name of the collection in MongoDb database
process_name: name of the analysis to be run
"""
location_reference = {
'US': 'United States',
'MX': 'Mexico',
'AR': 'Argentina',
'BR': 'Brazil',
'CA': 'Canada',
'DO': 'Dominican Republic',
'EC': 'Ecuador',
'ES': 'Spain',
'FR': 'France',
'GB': 'Great Britain',
'IN': 'India',
'IT': 'Italy',
'CO': 'Columbia',
'AU': 'Australia',
'JP': 'Japan',
'ID': 'Indonesia'
}
language_reference = {
'en': 'English',
'es': 'Spanish',
'fr': 'French',
'it': 'Italian',
'ja': 'Japanese',
'pt': 'Portugese',
'th': 'Thai',
'und': 'Undefined',
'tr': 'Turkish',
'ca': 'Catalan',
'in': 'Indonesian'
}
# Mongo Connection
client = MongoClient(port=27017)
db = client['Coronavirus']
collection = db[collection_name]
# Language analysis query
if process_name == 'Language':
data = collection.aggregate([
{
'$group': {
'_id': "$lang",
"count": { '$sum': 1 }
}
},
{
'$sort': { "count": -1 }
}
])
language_data = list(data)
# Transforming the query result into a 2D array as required by Google Charts
data = []
data.append(['Language', 'Count'])
for datum in language_data:
if datum['_id'] in language_reference:
data.append([language_reference[datum['_id']], datum['count']])
# data[language_reference[datum['_id']]] = datum['count']
else:
data.append([datum['_id'], datum['count']])
# data[datum['_id']] = datum['count']
# Location analysis query
else:
data = collection.aggregate([
{
'$group': {
'_id': "$place.country_code",
"count": { '$sum': 1 }
}
},
{
'$sort': { 'count': -1 }
}
])
location_data = list(data)
# Transforming the query result from list to dict
data = []
data.append(['Country', 'Count'])
for datum in location_data:
if datum['_id'] is not None:
if datum['_id'] in location_reference:
data.append([location_reference[datum['_id']], datum['count']])
# data[location_reference[datum['_id']]] = datum['count']
else:
data.append([datum['_id'], datum['count']])
# data[datum['_id']] = datum['count']
return data
def keyword_analysis(collection_name):
"""
Runs the Keyword analysis query on MongoDb
Parameters
-----------------
collection_name: name of the collection in MongoDb database
"""
# Mongo connection
client = MongoClient(port=27017)
db = client['Coronavirus']
collection = db[collection_name]
# Keywords analysis query for 'Coronavirus'
data = collection.aggregate([
{
'$match': { 'filter_words': 'coronavirus' }
},
{
'$group': {
'_id': {
'hour': { '$hour': '$time' },
'month': { '$month': '$time' },
'day': { '$dayOfMonth': '$time' },
'year': { '$year': '$time' }
},
'count': { '$sum': 1 }
}
}
])
coronavirus_data = list(data)
# Keywords analysis query for 'covid-19'
data = collection.aggregate([
{
'$match': { 'filter_words': 'covid-19' }
},
{
'$group': {
'_id': {
'hour': { '$hour': '$time' },
'month': { '$month': '$time' },
'day': { '$dayOfMonth': '$time' },
'year': { '$year': '$time' }
},
'count': { '$sum': 1 }
}
}
])
covid_data = list(data)
# Keywords analysis query for 'covid19'
data = collection.aggregate([
{
'$match': { 'filter_words': 'covid19' }
},
{
'$group': {
'_id': {
'hour': { '$hour': '$time' },
'month': { '$month': '$time' },
'day': { '$dayOfMonth': '$time' },
'year': { '$year': '$time' }
},
'count': { '$sum': 1 }
}
}
])
cov_data = list(data)
# Transforming the query results into a 2D array as required by Google Charts
formatted_data = []
formatted_data.append(['Time', 'Coronavirus', 'Covid-19', 'Covid19'])
for index in range(0, len(coronavirus_data)):
corona = coronavirus_data[index]
covid = covid_data[index]
cov = cov_data[index]
date = corona['_id']
time = str(date['year']) + '/' + str(date['month']) + '/' + str(date['day']) + ' ' + str(date['hour']) + ':00'
formatted_data.append([time, corona['count'], covid['count'], cov['count']])
return formatted_data
def mongo_import(directory, database_name, collection_name):
"""
Imports all the files from the given directory to the specified
MongoDb database and collection
Parameters
----------
directory: full directory path to the data files
database: name of the MongoDb database to import data to
collection: name of the MongoDb collection to import data to
"""
files = [join(directory, filename) for filename in listdir(directory) if isfile(join(directory, filename))]
for file in files:
import_process = subprocess.Popen(['mongoimport', '--db', database_name, '--collection', collection_name, '--file', file])
import_process.wait()
@bp.before_app_request
def load_logged_in_user():
"""If a user id is stored in the session, load the user object from
the database into ``g.user``"""
user_id = session.get('user_id')
if user_id is None:
g.user = None
else:
g.user = (
get_db().execute('SELECT * FROM user WHERE id = ?', (user_id,)).fetchone()
)
@bp.route('/collect', methods=['GET', 'POST'])
# @login_required
def collect():
"""Start a New Collection
Get input from user and start the twitter collection
"""
if request.method == 'POST':
request_data = json.loads(request.data)
keywords = request_data['keywords']
start_date = request_data['startDate']
start_time = request_data['startTime']
duration = request_data['duration']
directory = request_data['directory']
file_name = request_data['filename']
summary_file_name = request_data['summaryFilename']
error = None
if start_time in [None, '', ' ']:
error = 'Enter a valid start time'
elif duration in [None, '', ' ']:
error = 'Enter a valid duration'
elif start_date != '' and datetime.fromisoformat(start_date).date() < datetime.now().date():
error = 'Enter a valid date'
if error is None:
start_collection = threading.Thread(name='start_collection', target=run_tweet_collect, args=(keywords, start_date, start_time, duration, directory, file_name, summary_file_name), daemon=True)
start_collection.start()
return 'Success'
# run_tweet_collect(keywords, start_time, duration, file_name, summary_file_name)
else:
return error
return render_template('twitter/collect.html')
@bp.route('/process', methods=['GET', 'POST'])
# @login_required
def process():
"""Run analysis on tweets
Get input from user to process tweets
"""
if request.method == 'POST':
request_data = json.loads(request.data)
collection_name = request_data['analyticsCollection']
process_name = request_data['analysis']
title = collection_name + ' ' + process_name + ' Analysis'
if 'Location' in title:
title += ' (Ignoring tweets without location data)'
if process_name == 'Keyword':
data = keyword_analysis(collection_name)
column_names = ['Time', 'Coronavirus', 'Covid-19', 'Covid19']
options = {'width': 950, 'height': 500}
type = 'LineChart'
# return render_template('twitter/process.html', collection_list = ['practice', 'Mar_01', 'test'], process_list = ['Language', 'Location', 'Keyword'], title = title, line_data = line_data)
else:
data = run_query(collection_name, process_name)
if process_name == 'Location':
column_names = ['Location', 'Count']
else:
column_names = ['Language', 'Count']
options = {'pieHole': 0.4, 'sliceVisibilityThreshold': 0.02, 'width': 950, 'height': 500}
type = 'PieChart'
return jsonify({'title': title, 'type': type, 'data': data, 'columnNames': column_names, 'options': options})
# return render_template('twitter/process.html', collection_list = ['practice', 'Mar_01', 'test'], process_list = ['Language', 'Location', 'Keyword'], title = title, data = data)
return render_template('twitter/process.html', collection_list = ['practice', 'Mar_01', 'test'], process_list = ['Language', 'Location', 'Keyword'])
@bp.route('/mongo', methods=['GET', 'POST'])
# @login_required
def mongo():
"""Mongo operations
Get input from user to run MongoDb operations
"""
if request.method == 'POST':
request_data = json.loads(request.data)
if request_data['process'] == 'import':
directory = request_data['directory']
database_name = request_data['database']
collection_name = request_data['collection']
start_import = threading.Thread(name='Mongo Import', target=mongo_import, args=(directory, database_name, collection_name), daemon=True)
start_import.start()
elif request_data['process'] == 'adder':
database = request.form['database']
collection = request.form['collection']
keywords = request.form['keywords']
subprocess.Popen(['python', 'flaskr/static/mongo/KeywordAdder.py', database, collection, keywords])
return render_template('twitter/mongo.html')
|
executer.py
|
import subprocess
import multiprocessing
def work(rounds=10):
for x in xrange(rounds):
proc = subprocess.Popen("nosetests -q test_network_advanced_inter_vmconnectivity test_network_basic_adminstateup test_network_basic_dhcp_disable"
" test_network_basic_dhcp_lease test_network_basic_inter_vmconnectivity test_network_basic_metadata test_network_basic_multisubnet"
" test_network_basic_vmconnectivity --logging-level=INFO", shell=True, stdout=subprocess.PIPE)
for line in proc.stdout.readline():
print line
sys.stdout.flush()
if __name__ == '__main__':
for i in xrange(10):
p = multiprocessing.Process(target=work())
p.start()
|
wsdump.py
|
#!D:\puneeth\facemap\Scripts\python.exe
import argparse
import code
import sys
import threading
import time
import ssl
import gzip
import zlib
import six
from six.moves.urllib.parse import urlparse
import websocket
try:
import readline
except ImportError:
pass
def get_encoding():
encoding = getattr(sys.stdin, "encoding", "")
if not encoding:
return "utf-8"
else:
return encoding.lower()
OPCODE_DATA = (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY)
ENCODING = get_encoding()
class VAction(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if values is None:
values = "1"
try:
values = int(values)
except ValueError:
values = values.count("v") + 1
setattr(args, self.dest, values)
def parse_args():
parser = argparse.ArgumentParser(description="WebSocket Simple Dump Tool")
parser.add_argument("url", metavar="ws_url",
help="websocket url. ex. ws://echo.websocket.org/")
parser.add_argument("-p", "--proxy",
help="proxy url. ex. http://127.0.0.1:8080")
parser.add_argument("-v", "--verbose", default=0, nargs='?', action=VAction,
dest="verbose",
help="set verbose mode. If set to 1, show opcode. "
"If set to 2, enable to trace websocket module")
parser.add_argument("-n", "--nocert", action='store_true',
help="Ignore invalid SSL cert")
parser.add_argument("-r", "--raw", action="store_true",
help="raw output")
parser.add_argument("-s", "--subprotocols", nargs='*',
help="Set subprotocols")
parser.add_argument("-o", "--origin",
help="Set origin")
parser.add_argument("--eof-wait", default=0, type=int,
help="wait time(second) after 'EOF' received.")
parser.add_argument("-t", "--text",
help="Send initial text")
parser.add_argument("--timings", action="store_true",
help="Print timings in seconds")
parser.add_argument("--headers",
help="Set custom headers. Use ',' as separator")
return parser.parse_args()
class RawInput:
def raw_input(self, prompt):
if six.PY3:
line = input(prompt)
else:
line = raw_input(prompt)
if ENCODING and ENCODING != "utf-8" and not isinstance(line, six.text_type):
line = line.decode(ENCODING).encode("utf-8")
elif isinstance(line, six.text_type):
line = line.encode("utf-8")
return line
class InteractiveConsole(RawInput, code.InteractiveConsole):
def write(self, data):
sys.stdout.write("\033[2K\033[E")
# sys.stdout.write("\n")
sys.stdout.write("\033[34m< " + data + "\033[39m")
sys.stdout.write("\n> ")
sys.stdout.flush()
def read(self):
return self.raw_input("> ")
class NonInteractive(RawInput):
def write(self, data):
sys.stdout.write(data)
sys.stdout.write("\n")
sys.stdout.flush()
def read(self):
return self.raw_input("")
def main():
start_time = time.time()
args = parse_args()
if args.verbose > 1:
websocket.enableTrace(True)
options = {}
if args.proxy:
p = urlparse(args.proxy)
options["http_proxy_host"] = p.hostname
options["http_proxy_port"] = p.port
if args.origin:
options["origin"] = args.origin
if args.subprotocols:
options["subprotocols"] = args.subprotocols
opts = {}
if args.nocert:
opts = {"cert_reqs": ssl.CERT_NONE, "check_hostname": False}
if args.headers:
options['header'] = list(map(str.strip, args.headers.split(',')))
ws = websocket.create_connection(args.url, sslopt=opts, **options)
if args.raw:
console = NonInteractive()
else:
console = InteractiveConsole()
print("Press Ctrl+C to quit")
def recv():
try:
frame = ws.recv_frame()
except websocket.WebSocketException:
return websocket.ABNF.OPCODE_CLOSE, None
if not frame:
raise websocket.WebSocketException("Not a valid frame %s" % frame)
elif frame.opcode in OPCODE_DATA:
return frame.opcode, frame.data
elif frame.opcode == websocket.ABNF.OPCODE_CLOSE:
ws.send_close()
return frame.opcode, None
elif frame.opcode == websocket.ABNF.OPCODE_PING:
ws.pong(frame.data)
return frame.opcode, frame.data
return frame.opcode, frame.data
def recv_ws():
while True:
opcode, data = recv()
msg = None
if six.PY3 and opcode == websocket.ABNF.OPCODE_TEXT and isinstance(data, bytes):
data = str(data, "utf-8")
if isinstance(data, bytes) and len(data)>2 and data[:2] == b'\037\213': # gzip magick
try:
data = "[gzip] " + str(gzip.decompress(data), "utf-8")
except:
pass
elif isinstance(data, bytes):
try:
data = "[zlib] " + str(zlib.decompress(data, -zlib.MAX_WBITS), "utf-8")
except:
pass
if isinstance(data, bytes):
data = repr(data)
if args.verbose:
msg = "%s: %s" % (websocket.ABNF.OPCODE_MAP.get(opcode), data)
else:
msg = data
if msg is not None:
if args.timings:
console.write(str(time.time() - start_time) + ": " + msg)
else:
console.write(msg)
if opcode == websocket.ABNF.OPCODE_CLOSE:
break
thread = threading.Thread(target=recv_ws)
thread.daemon = True
thread.start()
if args.text:
ws.send(args.text)
while True:
try:
message = console.read()
ws.send(message)
except KeyboardInterrupt:
return
except EOFError:
time.sleep(args.eof_wait)
return
if __name__ == "__main__":
try:
main()
except Exception as e:
print(e)
|
node.py
|
# coding: utf-8
import json
from time import time
import requests
from .config import config
from .log import get_logger
log = get_logger("CONFIG")
from .utils import TargetRepeatingThread
class Node(object):
status = "unknown"
name = None
host = None
port = None
platforms = None
queue = None
sessions = None
pool = None
can_produce = 0
latency = None
locked = False
def __init__(self, name, host, port):
self.name = name
self.host = host
self.port = port
self.renew_info()
self.status_poller = TargetRepeatingThread(
target=self.reload_info, yield_time=config.get("NODE_POLLING_FREQUENCY"), name="Node_Status_Thread-%s" % self.name)
self.status_poller.start()
log.info("Node started: %s %s %s" % (self.name, self.host, self.port))
def stop(self):
self.status_poller.stop()
self.status_poller.join()
log.info("Node stopped: %s %s %s" % (self.name, self.host, self.port))
def renew_info(self, response=None):
if not response:
content = {}
else:
content = json.loads(response.content)
result = content.get("result", {})
pool = result.get("pool", {})
self.pool = pool.get("pool", {})
self.can_produce = pool.get("can_produce", 0)
self.sessions = result.get("sessions", [])
self.platforms = result.get("platforms", [])
self.queue = result.get("queue", [])
def reload_info(self):
_start = time()
try:
response = requests.get("http://%s:%s/api/status" % (self.host, self.port))
except requests.ConnectionError:
self.status = "offline"
response = None
self.latency = time() - _start
self.renew_info(response)
if response is not None:
if response.status_code != 200:
self.status = "bad"
elif response:
self.status = "online"
@property
def info(self):
return {
"status": self.status,
"name": self.name,
"host": self.host,
"port": self.port,
"platforms": self.platforms,
"sessions": self.sessions,
"queue": self.queue,
"pool": self.pool,
"can_produce": self.can_produce,
"latency": self.latency
}
def to_json(self):
return self.info
def lock(self):
self.locked = True
def unlock(self):
self.locked = False
class Nodes(object):
def __init__(self):
log.info("starting nodes")
self.nodes = []
nodes = config.get("NODES", {})
self._add_nodes(nodes)
self.reloader = TargetRepeatingThread(
target=self.reload, yield_time=config.get("NODES_RELOAD_FREQUENCY"), name="Nodes_Reloader_Thread")
self.reloader.start()
log.info("Nodes started")
def stop(self):
for node in self.nodes:
node.stop()
self.reloader.stop()
self.reloader.join()
log.info("Nodes preloader stopped")
def __iter__(self):
return iter(self.nodes)
def _add_nodes(self, nodes):
self.nodes.extend([Node(**node) for node in nodes])
def _remove_nodes_by_names(self, names):
removable_nodes = [node for node in self.nodes if node.name in names]
for r_node in removable_nodes:
self.nodes.remove(r_node)
def reload(self):
nodes = config.get("NODES", {})
have = set([node.name for node in self.nodes])
need = set([node.get("name") for node in nodes])
to_add = need - have
to_delete = have - need
self._add_nodes([node for node in nodes if node.get("name") in to_add])
self._remove_nodes_by_names(to_delete)
def get_node_by_dc(self, dc):
platform = dc.get("platform")
for node in self.nodes:
if node.locked:
continue
node.lock()
if platform in node.pool.keys():
node.pool[platform] -= 1
return node
if platform in node.platforms and node.can_produce > 0:
node.can_produce -= 1
return node
node.unlock()
@property
def platforms(self):
platforms = []
for node in self.nodes:
platforms += node.platforms
return platforms
def to_json(self):
return self.nodes
nodes = Nodes()
|
InterfaceTest.py
|
#! /usr/local/bin/python3
# -*- coding: UTF-8 -*-
# 接口测试
import urllib
from urllib import request,parse
from urllib.parse import urlparse # 定义一个func(url),获取他?后的参数,并返回成一个dict
import threading # 多线程
import requests
import time
import hashlib # md5 加密
import json
from json import JSONDecodeError
import logging # 异常数据
import xlwt # 表格创建
from datetime import datetime,timedelta # 时间判断
# import ThreadGetResult # 返回结果的多线程
import threading
www_time_arr = [] # www请求时间数组
php_time_arr = [] # php请求时间数组
# www 获取数据
def get_www_data(url,params):
time_begin = time.time()
request = requests.post(url,data=params)
try:
global www_time_arr
result_dict = json.loads(request.text) # 把 str 转换成 dict
result_json = json.dumps(result_dict, sort_keys=True, indent=2) # 把 dict 转换成 json
time_consume = time.time() - time_begin
print("\nwww json结果result:",result_json,"\nwww result数据类型:",type(result_json),"请求所需时间:",time_consume)
www_time_arr.append(time_consume)
print("www_time_arr的长度:%d" % len(www_time_arr))
return (result_json,time_consume)
except JSONDecodeError as e:
print("异常信息:", e.msg)
# php 获取数据
def get_php_data(url,params):
time_begin = time.time()
request = requests.post(url,data=params)
try:
global php_time_arr
result_dict = json.loads(request.text) # 把 str 转换成 dict
result_json = json.dumps(result_dict, sort_keys=True, indent=2) # 把 dict 转换成 json
time_consume = time.time()-time_begin # 耗时计算
print("\nphp json结果result:",result_json,"\nphp result数据类型:",type(result_json),"请求所需时间:",time_consume)
php_time_arr.append(time_consume)
print("php_time_arr的长度:%d"%len(php_time_arr))
return (result_json,time_consume)
except JSONDecodeError as e:
print("异常信息:",e.msg)
# 根据 url 返回参数
def querys_params(url):
qy = urlparse(url).query
item = urllib.parse.parse_qs(qy).items()
return dict([(k, v[0]) for k, v in item])
# 时间判断
def time_judge(time_bid,user_id):
flag = 1
while flag:
if datetime.now().strftime('%H:%M:%S') > time_bid:
print("明天执行")
tomorrow_day = (datetime.now() + timedelta(days=1)).strftime('%Y-%m-%d') + time_bid
while datetime.now().strftime('%H:%M:%S') >= time_bid or datetime.now().strftime(
'%Y-%m-%d %H-%M-%S') < tomorrow_day:
pass
break
else:
print("今天执行")
while datetime.now().strftime('%H:%M:%S') < time_bid:
pass
# 每个请求 10 次
start_request_ten(user_id)
break
# 设置单元格样式
def set_style(name='Arial', height=300, bold=False):
style = xlwt.XFStyle() # 初始化样式
font = xlwt.Font() # 为样式创建字体
font.name = name # 'Times New Roman'
font.bold = bold
font.color_index = 4
font.height = height
# borders= xlwt.Borders()
# borders.left= 6
# borders.right= 6
# borders.top= 6
# borders.bottom= 6
style.font = font
# style.borders = borders
return style
# 创建表格
def create_Excel(rows,columns,url_array):
# 创建工作簿
excel = xlwt.Workbook(encoding='utf-8')
# 创建工作表
excel_sheet = excel.add_sheet(u'工作表1')
# 设置每个单元格的宽高
excel_sheet.col(0).width = (100 * 70)
excel_sheet.col(1).width = (80 * 70)
excel_sheet.col(2).width = (80 * 70)
excel_sheet.col(3).width = (100 * 70)
excel_sheet.col(4).width = (80 * 70)
row0 = [u'时间',u'PHP耗时',u'WWW耗时',u'耗时降低率',u'接口']
date = datetime.now().strftime('%Y-%m-%d %H.%M.%S')
# 写表头
for i in range(0,len(row0)):
excel_sheet.write(0, i, row0[i], set_style('Times New Roman', 400, True))
# print("第一行,第%d列的值为:" % i, row0[i])
# 填充数据
for i in range(0,len(rows)): # 控制行
for j in range(0, len(row0)): # 控制列
if j == 0: # 时间
excel_sheet.write(i+1, j, '%s'%date, set_style())
elif j == 1: # PHP耗时
excel_sheet.write(i+1, j, rows[i], set_style())
elif j == 2: # WWW耗时
excel_sheet.write(i+1, j, columns[i], set_style())
elif j == 3: # 接口提升
excel_sheet.write(i+1, j, (columns[i] - rows[i]) / columns[i] * 100, set_style())
else:
excel_sheet.write(i+1, j, url_array[i], set_style())
print("\n第%d行的值为:"%(i+1),'%s'%date, rows[i], columns[i],(columns[i] - rows[i]) / columns[i] * 100,url_array[i])
excel.save('/Users/admin/Desktop/接口测试结果统计/统计结果 %s.xls'%date)
# 开始请求数据,每个接口 10 次
def start_request_ten(userId):
php = "https://php.rongtuojinrong.com/rongtuoxinsoc/huishang?"
www = "https://www.rongtuojinrong.com/hsesb/esb?"
url_array = [ # 0 回款计划 --- 投资
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=MonthRecivesCashPlan&UserId=12154&Token=a4cdb2347d4f3ae25d0597e86a4b6204&defidenshuxing=1&platformiOS=iOS&AppTime=1523245813&FlagChnl=1&Month=2018-04&AppId=iOS",
# 1 回款计划-月度查看详情 ---- 投资
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=ReciveMoneyMonthDayDetail&platformiOS=iOS&PageNum=1&MonthDay=2018-04&UserId=12154&AppTime=1523245973&FlagChnl=1&defidenshuxing=1&leixing=1&AppId=iOS&Type=1&PageSize=10&Token=f6589271286b9ed2eb7c398f5416caad",
# 2 回款计划 - 某天的详情 ---- 投资
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=DayRecivesCashPlan&Token=cff4876ce116f206a166c125a66f0af4&UserId=12154&defidenshuxing=1&platformiOS=iOS&FlagChnl=1&AppTime=1523251115&AppId=iOS&SearchDate=2018-04-02",
# 3 月度总览 ---- 投资
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=ReciveMoneyMonthlyOverview&Token=0366fd0f7198372945b24a04d7aa9f7c&UserId=12154&defidenshuxing=1&platformiOS=iOS&PageNum=1&Type=1&PageSize=10&AppTime=1523250892&AppId=iOS",
# 4 绑定银行卡界面 --- 获取推荐银行接口
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=LLBankOfCommend&UserId=126&Token=8eb6935e8367e113ee081fc9a62df157&defidenshuxing=1&platformiOS=iOS&CmdId=LLBankOfCommend&FlagChnl=1&AppTime=1523252717&AppId=iOS",
# 5 绑定银行卡界面 --- 判断用户是否开立电子账号
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=LLQueryIsRegist&UserId=126&Token=02e9e11ae3fb9b890fde08ba3ed5323b&defidenshuxing=1&platformiOS=iOS&CmdId=LLQueryIsRegist&FlagChnl=1&AppTime=1523253022&AppId=iOS",
# 6 绑定银行卡界面 --- 签约支付(绑卡界面签约接口)
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=Signcreatebill&CardNo=1111111111111111111&platformiOS=iOS&CmdId=Signcreatebill&UserId=126&AppTime=1523253022&BindMob=18953166668&FlagChnl=1&defidenshuxing=1&AppId=iOS&Token=02e9e11ae3fb9b890fde08ba3ed5323b&IdNo=371502198801134027&AcctName=周润秋",
# 7 绑定银行卡界面 --- 电子账户开立接口
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=RegistAcctNo&BindCardNo=1111111111111111111&platformiOS=iOS&UserId=126&AppTime=1523253022&Mobile=18953166668&FlagChnl=1&defidenshuxing=1&AppId=iOS&Token=02e9e11ae3fb9b890fde08ba3ed5323b&IdNo=371502198801134027&SurName=周润秋",
# 8 绑定银行卡界面 --- 需要绑卡绑定电子账户
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=SignCard&SigCard=1111111111111111111&platformiOS=iOS&UserId=126&AppTime=1523253022&Mobile=18953166668&FlagChnl=1&defidenshuxing=1&AppId=iOS&Token=02e9e11ae3fb9b890fde08ba3ed5323b",
# 9 银行卡列表界面 ---- 解卡接口
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=SignCardCancel&UserId=2513&Token=fa0d06450bafb366ad050429281203c3&defidenshuxing=0&platformiOS=iOS&FlagChnl=1&AppTime=1523253834&AppId=iOS&SigCard=1111111111111111111",
# 10 获取验证码接口
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=IdentifyCodeSend&UserId=2513&TransType=1&Token=fdd33272a809318e5b8c4a0691c5b68b&defidenshuxing=0&platformiOS=iOS&FlagChnl=1&AppTime=1523254954&AppId=iOS&PhoneNum=15169013960",
# 11 验证验证码接口
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=IdentifyCodeCheck&UserId=14973&TransType=1&Code=107066&Token=51c5ca5393002fa0b88924ce5dbd68b1&defidenshuxing=1&platformiOS=iOS&CmdId=IdentifyCodeCheck&AppTime=1523255116&AppId=iOS&PhoneNum=15169013960",
# 12 申请更换手机号界面(获取用户状态的接口,用户之前可能也申请过修改手机号
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=UserInfoReviewStatus&UserId=14973&Token=dc455796e54a783a9e64ed9c363d51bd&defidenshuxing=1&platformiOS=iOS&CmdId=UserInfoReviewStatus&AppTime=1523255223&AppId=iOS",
# 13 获取我的银行卡列表数据
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=LLQueryCard&UserId=10719&Token=4eac92a9b31420484683cca14fb9c5c8&defidenshuxing=1&platformiOS=iOS&AppTime=1523250517&FlagChnl=1&AppId=iOS",
# 14 充值页面 获取充值数据
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=BendiLLQueryEBankAcct&UserId=10719&Token=a8866e02cae1a696673a361adc746a6e&defidenshuxing=1&platformiOS=iOS&CmdId=BendiLLQueryEBankAcct&AppTime=1523251579&FlagChnl=1&AppId=iOS",
# 15 充值 -- 支付创单
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=LLNetSave&UserId=10719&Token=4aed492d25da2ad64510d9c85d33d822&defidenshuxing=1&TransAmt=1000.00&platformiOS=iOS&CmdId=LLNetSave&AppTime=1523251666&FlagChnl=1&AppId=iOS",
# 16 我的首页—融托投资账户金额数据请求
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=BendiGetUserInfo&Token=8339b8b53bc6cc2affd832b271831be6&UserId=11115&AppId=iOS&platformiOS=iOS&AppTime=1523245064&defidenshuxing=1",
# 17 是否设置交易密码
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=QueryAcctPasswordIsSet&Token=42f36390327b8537162de0d64a39c95c&UserId=194&AppId=iOS&platformiOS=iOS&AppTime=1523245632&defidenshuxing=1&FlagChnl=1",
# 18 提现列表
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=BendiLLCashQuery&UserId=11115&Token=0685013cd73f9710bb7cc10c1e550031&defidenshuxing=1&platformiOS=iOS&AppTime=1523252179&FlagChnl=1&AppId=iOS",
# 19 提现开户行省份
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=LLGetProvince&UserId=194&Token=1d92f641df807f05af10a2cf809d9343&defidenshuxing=1&platformiOS=iOS&AppTime=1523252887&FlagChnl=1&AppId=iOS",
# 20 提现开户行所在市
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=LLGetCity&UserId=194&defidenshuxing=1&Token=03f08ed2c11467e3f8f35f2b6596cd2e&Code=34&platformiOS=iOS&AppTime=1523252936&FlagChnl=1&AppId=iOS",
# 21 提现开户行列表
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=Prcptcdquery&CardNo=6222081602005990337&UserId=194&defidenshuxing=1&Token=e3b2e9d092aaa115e1c1d08564202a43&platformiOS=iOS&CityCode=150500&AppTime=1523253017&FlagChnl=1&AppId=iOS",
# 22 融托账户—我的债权(//项目状态jkzhuangtai(0:全部,6:未到期,8:已到期,10:冻结中))
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=GetMyZhaiQuan&UserId=12154&page_size=10&Token=48ee78f4cd49e230ebd10e69d6d2753d&defidenshuxing=1&platformiOS=iOS&txlx=0&jkzhuangtai=0&AppTime=1523253425&xmlx=0&page=1&AppId=iOS",
# 23 账单-请求有月份数据的接口
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=BendiGetDealMonthList&Token=47df4980cf8ed9602d933ba741f2e86f&UserId=12154&defidenshuxing=1&platformiOS=iOS&PageNum=1&AppTime=1523254322&DealTrench=0,1,2,3,4,5&AppId=iOS",
# 24 账单-请求选中单独月份内的数据
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=BendiGetDealDetailList&UserId=12154&Token=19ad4bd5944a6ba99d17ac28004ac1e8&defidenshuxing=1&platformiOS=iOS&TransMonth=2018-04&AppTime=1523254323&DealTrench=0,1,2,3,4,5&AppId=iOS",
# 25 月账单
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=BendiGetMonthDealStats&Token=fad38dfd71ef1eca604fd63563dd48e1&UserId=12154&defidenshuxing=1&platformiOS=iOS&AppTime=1523254748&SearchMonth=2017-12&AppId=iOS",
# 26 交易密码设置-验证验证码
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=IdentifyCodeCheck&AppId=iOS&UserId=11115&AppTime=1523255371&Token=715b83536a71aa461ac9050306318943&FlagChnl=1&TransType=2&PhoneNum=13520227421&Code=973816&platformiOS=iOS&defidenshuxing=1",
# 27 项目信息页 数据
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=GetProjectInfo&Token=1c4aff423a149fb42c54d5cf22b070a0&UserId=9166&jie_id=3116&defidenshuxing=1&platformiOS=iOS&AppTime=1523254471&AppId=iOS",
# 28 项目列表
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=GetAllProjectList&UserId=9166&pro_status=0&Token=c7a0d0516357288d37342abf9d037485&defidenshuxing=1&platformiOS=iOS&CmdId=GetAllProjectList&AppTime=1523254929&page=1&AppId=iOS&page_size=6",
# 29 首页-悬浮米袋
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=LLPurseDisplay&Token=df987f0c7c37eb9dac3c79e59ac6b1a1&UserId=9166&AppId=iOS&platformiOS=iOS&AppTime=1523255010&defidenshuxing=1",
# 30 确认投资-可用余额
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=mybalannumbendi&Token=22f6c196da8b58922c444ae76ad90db2&UserId=578&AppId=iOS&platformiOS=iOS&AppTime=1523257743&defidenshuxing=1",
# 31 确认投资
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=LLBidApply&AppTime=1523258224&AppId=huiyuan&UserId=578&Token=b889db0da990b9f04d8df1c5500438b9&ProjId=1587&TransAmt=100.00&RedPacket=&InterestCoupon=",
# 32 我的邀请(邀请过好友的)
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=GetInviterStats&Token=afb9ac2f34f9a7a2270802f2ae071d08&UserId=12154&AppId=iOS&platformiOS=iOS&AppTime=1523245389&defidenshuxing=1",
# 33 奖励明细
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=GetInviterAwardList&AwardType=0&StartTime=1388505600&UserId=12154&Token=77f67545004d49852a15b4a210ae0e31&defidenshuxing=1&PageNum=1&platformiOS=iOS&AppTime=1523245556&EndTime=1523203200&AppId=iOS",
# 34 我的卡券 -- 已使用和过期体验金
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=GetExperienceCoupon&Token=976ffd396a1e70215b13a642d9aebd2d&UserId=12154&Status=1,2&defidenshuxing=1&platformiOS=iOS&PageNum=1&AppTime=1523251394&AppId=iOS",
# 35 绑定银行卡界面 ---- 解卡后绑卡接口
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=BindCard&SigCard=1111111111111111111&platformiOS=iOS&UserId=126&AppTime=1523253022&Mobile=18953166668&FlagChnl=1&defidenshuxing=1&AppId=iOS&Token=02e9e11ae3fb9b890fde08ba3ed5323b",
# 36 风险评估问卷
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=GetTest&Token=215f3496285fa4eeead4b87e87b7b23a&UserId=10719&defidenshuxing=1&platformiOS=iOS&TestCode=RiskAssessmentQuestionnaire&AppTime=1523250706&AppId=iOS",
# 37 我的首页—获取用户评级(去评估)
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=GetUserGradeInfo&UserId=11115&Token=8339b8b53bc6cc2affd832b271831be6&defidenshuxing=1&platformiOS=iOS&CmdId=GetUserGradeInfo&AppTime=1523245064&AppId=iOS",
# 38 融托投资账户单独金额数据请求
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=BendiGetUserInfodandu&Token=33b9334d8775b481f7e93b5c570b7198&UserId=194&AppId=iOS&platformiOS=iOS&AppTime=1523251036&defidenshuxing=1",
# 39 项目信息页用于请求num字段判断加息行是否显示
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=GetInterestCouponNum&Token=f83c0ec6eebd175178108b7e54aad5ad&UserId=9166&ProjId=3116&defidenshuxing=1&platformiOS=iOS&AppTime=1523254436&AppId=iOS",
# 40 项目-下拉列表
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=XingMuListCategory&UserId=9166&Token=f8d2fefcbe3127c6ad5b7714398f389c&defidenshuxing=1&platformiOS=iOS&CmdId=XingMuListCategory&AppTime=1523255004&AppId=iOS"
]
now_time = round(time.time())
www_average_time_arr = [] # www 平均请求时间数组
php_average_time_arr = [] # php 平均请求时间数组
ts_www = []
ts_php = []
for i in range(len(url_array)):
params = querys_params(url_array[i])
params['AppTime'] = now_time
user_id = params['UserId']
if len(userId) != 0:
user_id = userId
params['UserId'] = user_id
token_str = "AppId=iOS&UserId=%s&AppTime=%d" % (user_id, now_time)
hash = hashlib.md5()
hash.update(token_str.encode('utf-8'))
token_md5 = hash.hexdigest()
params['Token'] = token_md5
print("Token:",token_md5,"user_id:",user_id,"token_str:",token_str)
print("\n第%d个字典值:" % i, params)
for j in range(0,10):
thread_www = threading.Thread(target=get_www_data,args=(www, params))
thread_www.start()
print("www 当前线程个数:%d" % threading.active_count(), "当前线程:", threading.current_thread())
ts_www.append(thread_www)
thread_php = threading.Thread(target=get_php_data, args=(php, params))
thread_php.start()
print("php 当前线程个数:%d" % threading.active_count(), "当前线程:", threading.current_thread())
ts_php.append(thread_php)
for m in range(0,len(ts_php)):
ts_www[m].join()
ts_php[m].join()
www_total = 0.0 # www 10次总时长
q = 0
k = 9
for q in range(q,len(www_time_arr)):
www_total += www_time_arr[q]
if q == k:
www_average = www_total / 10.0 # www 10次平均值
www_average_time_arr.append(www_average)
q = k
k = k + 10
www_total = 0.0
print("www q的值:%d"%q,"\nwww_average_time_arr:",www_average_time_arr,"\nk的值:%d"%k)
php_total = 0.0 # php 10次总时长
r = 0
h = 9
for r in range(r,len(php_time_arr)):
php_total += php_time_arr[r]
if r == h:
php_average = php_total / 10.0 # php 10次平均值
php_average_time_arr.append(php_average)
r = h
h = h + 10
php_total = 0.0
print("php r的值:%d" % r, "\nphp_average_time_arr:", php_average_time_arr, "\nh的值:%d" % h)
# print("php总时间:%d" % php_total, "www总时间:%d" % www_total)
print("www请求时间数组:",www_time_arr,"\nwww数组长度:%d"%len(www_time_arr))
print("php请求时间数组:",php_time_arr,"\nphp数组长度:%d"%len(php_time_arr))
print("www平均请求时间数组:", www_average_time_arr, "\n www数组长度:%d" % len(www_average_time_arr))
print("php平均请求时间数组:", php_average_time_arr, "\nphp数组长度:%d" % len(php_average_time_arr))
print("\n程序共用时:",time.time()-now_time)
# 创建列表
create_Excel(php_average_time_arr,www_average_time_arr,url_array)
# 开始请求数据,每个接口 1 次
def start_request_once():
php = "https://php.rongtuojinrong.com/rongtuoxinsoc/huishang?"
www = "https://www.rongtuojinrong.com/hsesb/esb?"
url_array = [ # 0 回款计划 --- 投资
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=MonthRecivesCashPlan&UserId=12154&Token=a4cdb2347d4f3ae25d0597e86a4b6204&defidenshuxing=1&platformiOS=iOS&AppTime=1523245813&FlagChnl=1&Month=2018-04&AppId=iOS",
# 1 回款计划-月度查看详情 ---- 投资
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=ReciveMoneyMonthDayDetail&platformiOS=iOS&PageNum=1&MonthDay=2018-04&UserId=12154&AppTime=1523245973&FlagChnl=1&defidenshuxing=1&leixing=1&AppId=iOS&Type=1&PageSize=10&Token=f6589271286b9ed2eb7c398f5416caad",
# 2 回款计划 - 某天的详情 ---- 投资
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=DayRecivesCashPlan&Token=cff4876ce116f206a166c125a66f0af4&UserId=12154&defidenshuxing=1&platformiOS=iOS&FlagChnl=1&AppTime=1523251115&AppId=iOS&SearchDate=2018-04-02",
# 3 月度总览 ---- 投资
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=ReciveMoneyMonthlyOverview&Token=0366fd0f7198372945b24a04d7aa9f7c&UserId=12154&defidenshuxing=1&platformiOS=iOS&PageNum=1&Type=1&PageSize=10&AppTime=1523250892&AppId=iOS",
# 4 绑定银行卡界面 --- 获取推荐银行接口
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=LLBankOfCommend&UserId=126&Token=8eb6935e8367e113ee081fc9a62df157&defidenshuxing=1&platformiOS=iOS&CmdId=LLBankOfCommend&FlagChnl=1&AppTime=1523252717&AppId=iOS",
# 5 绑定银行卡界面 --- 判断用户是否开立电子账号
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=LLQueryIsRegist&UserId=126&Token=02e9e11ae3fb9b890fde08ba3ed5323b&defidenshuxing=1&platformiOS=iOS&CmdId=LLQueryIsRegist&FlagChnl=1&AppTime=1523253022&AppId=iOS",
# 6 绑定银行卡界面 --- 签约支付(绑卡界面签约接口)
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=Signcreatebill&CardNo=1111111111111111111&platformiOS=iOS&CmdId=Signcreatebill&UserId=126&AppTime=1523253022&BindMob=18953166668&FlagChnl=1&defidenshuxing=1&AppId=iOS&Token=02e9e11ae3fb9b890fde08ba3ed5323b&IdNo=371502198801134027&AcctName=周润秋",
# 7 绑定银行卡界面 --- 需要绑卡绑定电子账户
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=SignCard&SigCard=1111111111111111111&platformiOS=iOS&UserId=126&AppTime=1523253022&Mobile=18953166668&FlagChnl=1&defidenshuxing=1&AppId=iOS&Token=02e9e11ae3fb9b890fde08ba3ed5323b",
# 8 银行卡列表界面 ---- 解卡接口
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=SignCardCancel&UserId=2513&Token=fa0d06450bafb366ad050429281203c3&defidenshuxing=0&platformiOS=iOS&FlagChnl=1&AppTime=1523253834&AppId=iOS&SigCard=1111111111111111111",
# # 9 获取验证码接口
# "https://www.rongtuojinrong.com/hsesb/esb?CmdId=IdentifyCodeSend&UserId=2513&TransType=1&Token=fdd33272a809318e5b8c4a0691c5b68b&defidenshuxing=0&platformiOS=iOS&FlagChnl=1&AppTime=1523254954&AppId=iOS&PhoneNum=15169013960",
# 10 验证验证码接口
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=IdentifyCodeCheck&UserId=14973&TransType=1&Code=107066&Token=51c5ca5393002fa0b88924ce5dbd68b1&defidenshuxing=1&platformiOS=iOS&CmdId=IdentifyCodeCheck&AppTime=1523255116&AppId=iOS&PhoneNum=15169013960",
# 11 申请更换手机号界面(获取用户状态的接口,用户之前可能也申请过修改手机号
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=UserInfoReviewStatus&UserId=14973&Token=dc455796e54a783a9e64ed9c363d51bd&defidenshuxing=1&platformiOS=iOS&CmdId=UserInfoReviewStatus&AppTime=1523255223&AppId=iOS",
# 12 获取我的银行卡列表数据
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=LLQueryCard&UserId=10719&Token=4eac92a9b31420484683cca14fb9c5c8&defidenshuxing=1&platformiOS=iOS&AppTime=1523250517&FlagChnl=1&AppId=iOS",
# # 13 获取用户信息
# "https://www.rongtuojinrong.com/hsesb/esb?CmdId=BendiLLQueryEBankAcct&UserId=10719&Token=54a1f22a5800ba323bac7905bf307c6a&defidenshuxing=1&platformiOS=iOS&AppTime=1523250558&FlagChnl=1&AppId=iOS",
# 14 充值页面 获取充值数据
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=BendiLLQueryEBankAcct&UserId=10719&Token=a8866e02cae1a696673a361adc746a6e&defidenshuxing=1&platformiOS=iOS&AppTime=1523251579&FlagChnl=1&AppId=iOS",
# 15 充值 -- 支付创单
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=LLNetSave&UserId=10719&Token=4aed492d25da2ad64510d9c85d33d822&defidenshuxing=1&TransAmt=1000.00&platformiOS=iOS&CmdId=LLNetSave&AppTime=1523251666&FlagChnl=1&AppId=iOS",
# 16 我的首页—融托投资账户金额数据请求
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=BendiGetUserInfo&Token=8339b8b53bc6cc2affd832b271831be6&UserId=11115&AppId=iOS&platformiOS=iOS&AppTime=1523245064&defidenshuxing=1",
# 17 是否设置交易密码
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=QueryAcctPasswordIsSet&Token=42f36390327b8537162de0d64a39c95c&UserId=194&AppId=iOS&platformiOS=iOS&AppTime=1523245632&defidenshuxing=1&FlagChnl=1",
# 18 交易密码设置-徽商web页面
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=LLPassWordSet&Token=42f36390327b8537162de0d64a39c95c&UserId=194&AppId=iOS&platformiOS=iOS&AppTime=1523245632&defidenshuxing=1&FlagChnl=1",
# 19 提现列表
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=BendiLLCashQuery&UserId=11115&Token=0685013cd73f9710bb7cc10c1e550031&defidenshuxing=1&platformiOS=iOS&AppTime=1523252179&FlagChnl=1&AppId=iOS",
# # 20 取现提交-web页面
# "https://www.rongtuojinrong.com/hsesb/esb?CmdId=LLCash&AppId=iOS&UserId=10877&AppTime=1523252546&Token=ada8e3adceb662553f48534ad74f2642&FlagChnl=1&TransAmt=123.00&TransType=1&BankCnaps=105471000030&BrabankName=中国建设银行股份有限公司聊城振兴路支行&defidenshuxing=2",
# 21 提现开户行省份
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=LLGetProvince&UserId=194&Token=1d92f641df807f05af10a2cf809d9343&defidenshuxing=1&platformiOS=iOS&AppTime=1523252887&FlagChnl=1&AppId=iOS",
# 22 提现开户行所在市
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=LLGetCity&UserId=194&defidenshuxing=1&Token=03f08ed2c11467e3f8f35f2b6596cd2e&Code=34&platformiOS=iOS&AppTime=1523252936&FlagChnl=1&AppId=iOS",
# 23 提现开户行列表
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=Prcptcdquery&CardNo=6222081602005990337&UserId=194&defidenshuxing=1&Token=e3b2e9d092aaa115e1c1d08564202a43&platformiOS=iOS&CityCode=150500&AppTime=1523253017&FlagChnl=1&AppId=iOS",
# 24 融托账户—我的债权(//项目状态jkzhuangtai(0:全部,6:未到期,8:已到期,10:冻结中))
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=GetMyZhaiQuan&UserId=12154&page_size=10&Token=48ee78f4cd49e230ebd10e69d6d2753d&defidenshuxing=1&platformiOS=iOS&txlx=0&jkzhuangtai=0&AppTime=1523253425&xmlx=0&page=1&AppId=iOS",
# 25 账单-请求有月份数据的接口
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=BendiGetDealMonthList&Token=47df4980cf8ed9602d933ba741f2e86f&UserId=12154&defidenshuxing=1&platformiOS=iOS&PageNum=1&AppTime=1523254322&DealTrench=0,1,2,3,4,5&AppId=iOS",
# 26 账单-请求选中单独月份内的数据
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=BendiGetDealDetailList&UserId=12154&Token=19ad4bd5944a6ba99d17ac28004ac1e8&defidenshuxing=1&platformiOS=iOS&TransMonth=2018-04&AppTime=1523254323&DealTrench=0,1,2,3,4,5&AppId=iOS",
# 27 月账单
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=BendiGetMonthDealStats&Token=fad38dfd71ef1eca604fd63563dd48e1&UserId=12154&defidenshuxing=1&platformiOS=iOS&AppTime=1523254748&SearchMonth=2017-12&AppId=iOS",
# 28 交易密码设置-获取验证码
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=IdentifyCodeSend&AppId=iOS&UserId=11115&AppTime=1523255225&Token=e8f91db4be666e8d2d4a8458facc143b&FlagChnl=1&TransType=2&PhoneNum=13520227421&platformiOS=iOS&defidenshuxing=1",
# # 29 交易密码设置-验证验证码
# "https://www.rongtuojinrong.com/hsesb/esb?CmdId=IdentifyCodeCheck&AppId=iOS&UserId=11115&AppTime=1523255371&Token=715b83536a71aa461ac9050306318943&FlagChnl=1&TransType=2&PhoneNum=13520227421&Code=973816&platformiOS=iOS&defidenshuxing=1",
# 30 项目信息页 数据
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=GetProjectInfo&Token=1c4aff423a149fb42c54d5cf22b070a0&UserId=9166&jie_id=3116&defidenshuxing=1&platformiOS=iOS&AppTime=1523254471&AppId=iOS",
# 31 项目列表
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=GetAllProjectList&UserId=9166&pro_status=0&Token=c7a0d0516357288d37342abf9d037485&defidenshuxing=1&platformiOS=iOS&CmdId=GetAllProjectList&AppTime=1523254929&page=1&AppId=iOS&page_size=6",
# 32 首页-悬浮米袋
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=LLPurseDisplay&Token=df987f0c7c37eb9dac3c79e59ac6b1a1&UserId=9166&AppId=iOS&platformiOS=iOS&AppTime=1523255010&defidenshuxing=1",
# 33 用户开户成功后判断用户是否设置交易密码
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=QueryAcctPasswordIsSet&FlagChnl=1&UserId=9166&defidenshuxing=1&Token=f8d2fefcbe3127c6ad5b7714398f389c&AppTime=1523255004&AppId=iOS",
# 34 确认投资-可用余额
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=mybalannumbendi&Token=22f6c196da8b58922c444ae76ad90db2&UserId=578&AppId=iOS&platformiOS=iOS&AppTime=1523257743&defidenshuxing=1",
# 35 确认投资
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=LLBidApply&AppTime=1523258224&AppId=huiyuan&UserId=578&Token=b889db0da990b9f04d8df1c5500438b9&ProjId=1587&TransAmt=100.00&RedPacket=&InterestCoupon=",
# 36 我的邀请(邀请过好友的)
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=GetInviterStats&Token=afb9ac2f34f9a7a2270802f2ae071d08&UserId=12154&AppId=iOS&platformiOS=iOS&AppTime=1523245389&defidenshuxing=1",
# 37 奖励明细
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=GetInviterAwardList&AwardType=0&StartTime=1388505600&UserId=12154&Token=77f67545004d49852a15b4a210ae0e31&defidenshuxing=1&PageNum=1&platformiOS=iOS&AppTime=1523245556&EndTime=1523203200&AppId=iOS",
# 38 我的卡券 -- 已使用和过期体验金
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=GetExperienceCoupon&Token=976ffd396a1e70215b13a642d9aebd2d&UserId=12154&Status=1,2&defidenshuxing=1&platformiOS=iOS&PageNum=1&AppTime=1523251394&AppId=iOS",
# 39 绑定银行卡界面 ---- 解卡后绑卡接口
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=BindCard&SigCard=1111111111111111111&platformiOS=iOS&UserId=126&AppTime=1523253022&Mobile=18953166668&FlagChnl=1&defidenshuxing=1&AppId=iOS&Token=02e9e11ae3fb9b890fde08ba3ed5323b",
# 40 风险评估问卷
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=GetTest&Token=215f3496285fa4eeead4b87e87b7b23a&UserId=10719&defidenshuxing=1&platformiOS=iOS&TestCode=RiskAssessmentQuestionnaire&AppTime=1523250706&AppId=iOS",
# 41 我的首页—获取用户评级(去评估)
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=GetUserGradeInfo&UserId=11115&Token=8339b8b53bc6cc2affd832b271831be6&defidenshuxing=1&platformiOS=iOS&CmdId=GetUserGradeInfo&AppTime=1523245064&AppId=iOS",
# 42 融托投资账户单独金额数据请求
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=BendiGetUserInfodandu&Token=33b9334d8775b481f7e93b5c570b7198&UserId=194&AppId=iOS&platformiOS=iOS&AppTime=1523251036&defidenshuxing=1",
# 43 项目信息页用于请求num字段判断加息行是否显示
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=GetInterestCouponNum&Token=f83c0ec6eebd175178108b7e54aad5ad&UserId=9166&ProjId=3116&defidenshuxing=1&platformiOS=iOS&AppTime=1523254436&AppId=iOS",
# 44 项目-下拉列表
"https://www.rongtuojinrong.com/hsesb/esb?CmdId=XingMuListCategory&UserId=9166&Token=f8d2fefcbe3127c6ad5b7714398f389c&defidenshuxing=1&platformiOS=iOS&CmdId=XingMuListCategory&AppTime=1523255004&AppId=iOS"
]
now_time = round(time.time())
url_error_arr = [] # 请求不相同的链接数组
for i in range(len(url_array)):
params = querys_params(url_array[i])
params['AppTime'] = now_time
user_id = params['UserId']
token_str = "AppId=iOS&UserId=%s&AppTime=%d" % (user_id, now_time)
hash = hashlib.md5()
hash.update(token_str.encode('utf-8'))
token_md5 = hash.hexdigest()
params['Token'] = token_md5
print("\n第%d个字典值:" % i, params)
for j in range(0,10):
result_www = get_www_data(www, params)
result_php = get_php_data(php, params)
if result_www[0] == result_php[0]:
print("结果相同,接口正确")
else:
print("结果不同,接口错误")
url_error_arr.append(url_array[i])
print("出现错误的接口%d个:\n" % len(url_error_arr), url_error_arr)
if __name__ == '__main__':
# time_bid = input("请输入测试时间,格式(14:00:00),点击enter:")
user_id = input("请输入user_id,也可以不输入,点击enter:")
# time_judge(time_bid,user_id)
start_request_ten(user_id) # 每个请求 10 次
# start_request_once() # 每个请求一次
|
custom_player.py
|
from ffpyplayer.player import MediaPlayer
import queue
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import threading
import numpy as np
import io
import time
import cv2
import datetime
# https://currentmillis.com/
q = queue.Queue()
#wins3 = "rtmp://143.248.55.86/live/wins3"
wins3 = "rtmp://143.248.55.86:1913/live/wins3"
wins2 = "rtmp://143.248.55.86:1911/live/wins2"
wins = "rtmp://143.248.55.86:31935/live/wins"
local_addr = "rtmp://127.0.0.1/live/wins"
def get_frame():
player = MediaPlayer(wins3, ff_opts={'out_fmt': 'rgb24'})
while 1:
frame, val = player.get_frame()
if val == 'eof':
break
elif frame is None:
time.sleep(0.002)
else:
q.put(frame)
def play():
flag = True
bef_t = 0
bef_show = datetime.datetime.now()
frame_jitter = 33.333333333333333333
while 1:
if q.empty():
time.sleep(0.001)
else:
frame = q.get()
img, t = frame
w, h = img.get_size()
nparray = np.frombuffer(img.to_bytearray()[0], dtype=np.uint8)
nparray = nparray.reshape(h, w, 3)
nparray = cv2.cvtColor(nparray, cv2.COLOR_RGB2BGR)
while datetime.datetime.now() - bef_show <= datetime.timedelta(milliseconds=frame_jitter):
time.sleep(0.001)
if datetime.datetime.now() - bef_show >= datetime.timedelta(milliseconds=100) or True:
print(datetime.datetime.now().strftime("%H:%M:%S.%f"), int(q.qsize() * 33.333333333), (datetime.datetime.now() - bef_show).microseconds//1000, t)
bef_show = datetime.datetime.now()
cv2.imshow("test", nparray)
# delay = 1 if flag else 30
key = cv2.waitKey(1) & 0xFF
if key == 27:
break
if bef_t - t >= 0.05:
print("skip")
if key == ord('t'):
print("toggle to " + str(1))
frame_jitter = 1 if flag else 33
flag = not flag
t = threading.Thread(target=get_frame)
t.start()
t = threading.Thread(target=play)
t.start()
|
test_dag_serialization.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for stringified DAGs."""
import multiprocessing
import unittest
from datetime import datetime, timedelta
from unittest import mock
from dateutil.relativedelta import FR, relativedelta
from parameterized import parameterized
from airflow import example_dags
from airflow.contrib import example_dags as contrib_example_dags
from airflow.gcp import example_dags as gcp_example_dags
from airflow.hooks.base_hook import BaseHook
from airflow.models import DAG, BaseOperator, Connection, DagBag
from airflow.operators.bash_operator import BashOperator
from airflow.operators.subdag_operator import SubDagOperator
from airflow.serialization import SerializedBaseOperator, SerializedDAG
from airflow.utils.tests import CustomBaseOperator, GoogleLink
serialized_simple_dag_ground_truth = {
"__version": 1,
"dag": {
"default_args": {
"__type": "dict",
"__var": {
"depends_on_past": False,
"retries": 1,
"retry_delay": {
"__type": "timedelta",
"__var": 300.0
}
}
},
"start_date": 1564617600.0,
"params": {},
"_dag_id": "simple_dag",
"fileloc": None,
"tasks": [
{
"task_id": "simple_task",
"owner": "airflow",
"retries": 1,
"retry_delay": 300.0,
"_downstream_task_ids": [],
"_inlets": {
"auto": False, "task_ids": [], "datasets": []
},
"_outlets": {"datasets": []},
"ui_color": "#fff",
"ui_fgcolor": "#000",
"template_fields": [],
"_task_type": "BaseOperator",
"_task_module": "airflow.models.baseoperator",
},
{
"task_id": "custom_task",
"retries": 1,
"retry_delay": 300.0,
"_downstream_task_ids": [],
"_inlets": {
"auto": False, "task_ids": [], "datasets": []
},
"_outlets": {"datasets": []},
"ui_color": "#fff",
"ui_fgcolor": "#000",
"template_fields": [],
"_task_type": "CustomBaseOperator",
"_task_module": "airflow.utils.tests",
},
],
"timezone": "UTC",
},
}
def make_example_dags(module):
"""Loads DAGs from a module for test."""
dagbag = DagBag(module.__path__[0])
return dagbag.dags
def make_simple_dag():
"""Make very simple DAG to verify serialization result."""
dag = DAG(
dag_id='simple_dag',
default_args={
"retries": 1,
"retry_delay": timedelta(minutes=5),
"depends_on_past": False,
},
start_date=datetime(2019, 8, 1),
)
BaseOperator(task_id='simple_task', dag=dag, owner='airflow')
CustomBaseOperator(task_id='custom_task', dag=dag)
return {'simple_dag': dag}
def make_user_defined_macro_filter_dag():
""" Make DAGs with user defined macros and filters using locally defined methods.
For Webserver, we do not include ``user_defined_macros`` & ``user_defined_filters``.
The examples here test:
(1) functions can be successfully displayed on UI;
(2) templates with function macros have been rendered before serialization.
"""
def compute_next_execution_date(dag, execution_date):
return dag.following_schedule(execution_date)
default_args = {
'start_date': datetime(2019, 7, 10)
}
dag = DAG(
'user_defined_macro_filter_dag',
default_args=default_args,
user_defined_macros={
'next_execution_date': compute_next_execution_date,
},
user_defined_filters={
'hello': lambda name: 'Hello %s' % name
},
catchup=False
)
BashOperator(
task_id='echo',
bash_command='echo "{{ next_execution_date(dag, execution_date) }}"',
dag=dag,
)
return {dag.dag_id: dag}
def collect_dags():
"""Collects DAGs to test."""
dags = {}
dags.update(make_simple_dag())
dags.update(make_user_defined_macro_filter_dag())
dags.update(make_example_dags(example_dags))
dags.update(make_example_dags(contrib_example_dags))
dags.update(make_example_dags(gcp_example_dags))
return dags
def serialize_subprocess(queue):
"""Validate pickle in a subprocess."""
dags = collect_dags()
for dag in dags.values():
queue.put(SerializedDAG.to_json(dag))
queue.put(None)
class TestStringifiedDAGs(unittest.TestCase):
"""Unit tests for stringified DAGs."""
def setUp(self):
super().setUp()
BaseHook.get_connection = mock.Mock(
return_value=Connection(
extra=('{'
'"project_id": "mock", '
'"location": "mock", '
'"instance": "mock", '
'"database_type": "postgres", '
'"use_proxy": "False", '
'"use_ssl": "False"'
'}')))
self.maxDiff = None # pylint: disable=invalid-name
def test_serialization(self):
"""Serialization and deserialization should work for every DAG and Operator."""
dags = collect_dags()
serialized_dags = {}
for _, v in dags.items():
dag = SerializedDAG.to_dict(v)
SerializedDAG.validate_schema(dag)
serialized_dags[v.dag_id] = dag
# Compares with the ground truth of JSON string.
self.validate_serialized_dag(
serialized_dags['simple_dag'],
serialized_simple_dag_ground_truth)
def validate_serialized_dag(self, json_dag, ground_truth_dag):
"""Verify serialized DAGs match the ground truth."""
self.assertTrue(
json_dag['dag']['fileloc'].split('/')[-1] == 'test_dag_serialization.py')
json_dag['dag']['fileloc'] = None
def sorted_serialized_dag(dag_dict: dict):
"""
Sorts the "tasks" list in the serialised dag python dictionary
This is needed as the order of tasks should not matter but assertEqual
would fail if the order of tasks list changes in dag dictionary
"""
dag_dict["dag"]["tasks"] = sorted(dag_dict["dag"]["tasks"],
key=lambda x: sorted(x.keys()))
return dag_dict
self.assertEqual(sorted_serialized_dag(ground_truth_dag),
sorted_serialized_dag(json_dag))
def test_deserialization(self):
"""A serialized DAG can be deserialized in another process."""
queue = multiprocessing.Queue()
proc = multiprocessing.Process(
target=serialize_subprocess, args=(queue,))
proc.daemon = True
proc.start()
stringified_dags = {}
while True:
v = queue.get()
if v is None:
break
dag = SerializedDAG.from_json(v)
self.assertTrue(isinstance(dag, DAG))
stringified_dags[dag.dag_id] = dag
dags = collect_dags()
self.assertTrue(set(stringified_dags.keys()) == set(dags.keys()))
# Verify deserialized DAGs.
example_skip_dag = stringified_dags['example_skip_dag']
skip_operator_1_task = example_skip_dag.task_dict['skip_operator_1']
self.validate_deserialized_task(
skip_operator_1_task, 'DummySkipOperator', '#e8b7e4', '#000')
# Verify that the DAG object has 'full_filepath' attribute
# and is equal to fileloc
self.assertTrue(hasattr(example_skip_dag, 'full_filepath'))
self.assertEqual(example_skip_dag.full_filepath, example_skip_dag.fileloc)
example_subdag_operator = stringified_dags['example_subdag_operator']
section_1_task = example_subdag_operator.task_dict['section-1']
self.validate_deserialized_task(
section_1_task,
SubDagOperator.__name__,
SubDagOperator.ui_color,
SubDagOperator.ui_fgcolor
)
simple_dag = stringified_dags['simple_dag']
custom_task = simple_dag.task_dict['custom_task']
self.validate_operator_extra_links(custom_task)
def validate_deserialized_task(self, task, task_type, ui_color, ui_fgcolor):
"""Verify non-airflow operators are casted to BaseOperator."""
self.assertTrue(isinstance(task, SerializedBaseOperator))
# Verify the original operator class is recorded for UI.
self.assertTrue(task.task_type == task_type)
self.assertTrue(task.ui_color == ui_color)
self.assertTrue(task.ui_fgcolor == ui_fgcolor)
# Check that for Deserialised task, task.subdag is None for all other Operators
# except for the SubDagOperator where task.subdag is an instance of DAG object
if task.task_type == "SubDagOperator":
self.assertIsNotNone(task.subdag)
self.assertTrue(isinstance(task.subdag, DAG))
else:
self.assertIsNone(task.subdag)
def validate_operator_extra_links(self, task):
"""
This tests also depends on GoogleLink() registered as a plugin
in tests/plugins/test_plugin.py
The function tests that if extra operator links are registered in plugin
in ``operator_extra_links`` and the same is also defined in
the Operator in ``BaseOperator.operator_extra_links``, it has the correct
extra link.
"""
self.assertEqual(
task.operator_extra_link_dict[GoogleLink.name].get_link(
task, datetime(2019, 8, 1)),
"https://www.google.com"
)
@parameterized.expand([
(None, None),
("@weekly", "@weekly"),
({"__type": "timedelta", "__var": 86400.0}, timedelta(days=1)),
])
def test_deserialization_schedule_interval(self, serialized_schedule_interval, expected):
serialized = {
"__version": 1,
"dag": {
"default_args": {"__type": "dict", "__var": {}},
"params": {},
"_dag_id": "simple_dag",
"fileloc": __file__,
"tasks": [],
"timezone": "UTC",
"schedule_interval": serialized_schedule_interval,
},
}
SerializedDAG.validate_schema(serialized)
dag = SerializedDAG.from_dict(serialized)
self.assertEqual(dag.schedule_interval, expected)
@parameterized.expand([
(relativedelta(days=-1), {"__type": "relativedelta", "__var": {"days": -1}}),
(relativedelta(month=1, days=-1), {"__type": "relativedelta", "__var": {"month": 1, "days": -1}}),
# Every friday
(relativedelta(weekday=FR), {"__type": "relativedelta", "__var": {"weekday": [4]}}),
# Every second friday
(relativedelta(weekday=FR(2)), {"__type": "relativedelta", "__var": {"weekday": [4, 2]}})
])
def test_roundtrip_relativedelta(self, val, expected):
serialized = SerializedDAG._serialize(val)
self.assertDictEqual(serialized, expected)
round_tripped = SerializedDAG._deserialize(serialized)
self.assertEqual(val, round_tripped)
if __name__ == '__main__':
unittest.main()
|
colorSegmentationTSR.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#%%
import cv2
import time
import numpy as np
import math
import glob
import pickle
import os
from collections import OrderedDict
from scipy.spatial import distance as dist
from skimage import feature
#import colorcorrect.algorithm as cca
#import matplotlib.pyplot as plt
#from scipy.ndimage.filters import gaussian_filter
#from skimage import img_as_float
from threading import Thread
from imutils.video import FPS
#%% Camara class definition
class liveStreaming:
def __init__(self, backSource, widthRes=640, heightRes=480, framerate=15):
# Camera parameters
self.streaming = cv2.VideoCapture(backSource)
self.streaming.set(cv2.CAP_PROP_FPS, framerate)
self.streaming.set(cv2.CAP_PROP_FRAME_WIDTH, widthRes)
self.streaming.set(cv2.CAP_PROP_FRAME_HEIGHT, heightRes)
self.streaming.set(cv2.CAP_PROP_FOURCC, 6)
self.keepReading = True
pass
def startThread(self):
# https://www.pyimagesearch.com/2015/12/21/increasing-webcam-fps-with-python-and-opencv/
Thread(target=self.updateStream, args=()).start()
return self
def updateStream(self):
while self.keepReading:
(self.acquired, self.currentFrame) = self.streaming.read()
self.streaming.release()
def readFrame(self):
return self.currentFrame
def stopRecording(self):
self.keepReading = False
class colorEnhancing:
def __init__(self, gamma = 1.3):
self.gammaTable = np.array([((i / 255.0) ** (1.0 / gamma)) * 255
for i in np.arange(0, 256)]).astype("uint8")
self.clahe = cv2.createCLAHE(2, (3,3))
self.clahe.setClipLimit(2)
pass
def adjust_gamma(self, image):
# https://www.pyimagesearch.com/2015/10/05/opencv-gamma-correction/
return cv2.LUT(image, self.gammaTable)
def balanceChannelRange(self, channel, perc = 5):
mi, ma = (np.percentile(channel, perc), np.percentile(channel,100.0-perc))
channel = np.clip((channel-mi)*255.0/(ma-mi), 0, 255).astype(np.uint8)
return channel
def apply_mask(self,matrix, mask, fill_value):
masked = np.ma.array(matrix, mask=mask, fill_value=fill_value)
return masked.filled()
def apply_threshold(self,matrix, low_value, high_value):
low_mask = matrix < low_value
matrix = self.apply_mask(matrix, low_mask, low_value)
high_mask = matrix > high_value
matrix = self.apply_mask(matrix, high_mask, high_value)
return matrix
def simplest_cb(self,img, half_percent = 0.025):
#https://gist.github.com/DavidYKay/9dad6c4ab0d8d7dbf3dc
#https://web.stanford.edu/~sujason/ColorBalancing/simplestcb.html
out_channels = []
for colors in range(3):
channel = img[:,:,colors]
flat = np.sort(channel.reshape(channel.size))
low_val = flat[math.floor(flat.shape[0] * half_percent)]
high_val = flat[math.ceil(flat.shape[0] * (1.0 - half_percent))]
thresholded = self.apply_threshold(channel, low_val, high_val)
out_channels.append(cv2.normalize(thresholded, None, 0, 255, cv2.NORM_MINMAX))
return cv2.merge(out_channels)
def grayWorldBalance(self,frame):
balancedImage = cv2.cvtColor(frame, cv2.COLOR_RGB2LAB)
correctionFactors = np.subtract(cv2.mean(balancedImage[:,:,1:3])[0:2], 128)/ 280.5
balancedImage[:,:,1] = balancedImage[:,:,1] - balancedImage[:,:,0]*correctionFactors[0]
balancedImage[:,:,2] = balancedImage[:,:,2] - balancedImage[:,:,0]*correctionFactors[1]
balancedImage = cv2.cvtColor(balancedImage, cv2.COLOR_LAB2RGB)
return balancedImage
def equalizeColorlHist(self,frame):
for channel in range(3):
frame[:,:,channel] = cv2.equalizeHist(frame[:,:,channel])
return frame
def equalizeColorCLAHE(self,frame):
for channel in range(3):
frame[:,:,channel] = self.clahe.apply(frame[:,:,channel])
return frame
def equalizeColorSP(self,frame):
#https://stackoverflow.com/questions/24341114/simple-illumination-correction-in-images-opencv-c
SPChannels = cv2.cvtColor(frame, cv2.COLOR_RGB2YCrCb) # cv2.COLOR_RGB2LAB
#SPChannels[:,:,0] = self.clahe.apply(SPChannels[:,:,0])
SPChannels[:,:,0] = self.clahe.apply(SPChannels[:,:,0]) #cv2.equalizeHist(SPChannels[:,:,0])
equalizedImage = cv2.cvtColor(SPChannels, cv2.COLOR_YCrCb2RGB)
return equalizedImage
def balanceImage(self,image):
balancedImage = self.adjust_gamma(image)
#balancedImage = np.dstack([self.balanceChannelRange(channel) for channel in cv2.split(balancedImage)])
#balancedImage = self.simplest_cb(balancedImage)
#balancedImage = self.equalizeColorlHist(balancedImage)
#balancedImage = self.equalizeColorCLAHE(balancedImage)
balancedImage = self.equalizeColorSP(balancedImage)
#balancedImage = self.grayWorldBalance(balancedImage)
#cv.xphoto_WhiteBalancer.balanceWhite
#balancedImage = cca.grey_world(balancedImage)
return balancedImage
class deconvolutionFilter:
def __init__(self, imageResolution, kernelSize = 5, angle = np.pi/6):
self.imageSize = imageResolution
self.psfKernSize = kernelSize
self.motionAngle = angle
self.zSize = 65
self.noise = 0
self.createKernels().computeKernelSpectra()
def blur_edge(self,imageToProcess):
d = self.psfKernSize
img_pad = cv2.copyMakeBorder(imageToProcess, d, d, d, d, cv2.BORDER_WRAP)
img_blur = cv2.GaussianBlur(img_pad, (2*d+1, 2*d+1), -1)[d:-d,d:-d]
y, x = np.indices((self.imageResolution[1], self.imageResolution[0]))
dist = np.dstack([x, self.imageResolution[0]-x-1, y, self.imageResolution[1]-y-1]).min(-1)
w = np.minimum(np.float32(dist)/d, 1.0)
return imageToProcess*w + img_blur*(1-w)
def computeMotionKernel(self):
kern = np.ones((1, self.psfKernSize), np.float32)
c, s = np.cos(self.motionAngle), np.sin(self.motionAngle)
A = np.float32([[c, -s, 0], [s, c, 0]])
sz2 = self.zSize // 2
A[:,2] = (sz2, sz2) - np.dot(A[:,:2], ((self.psfKernSize-1)*0.5, 0))
self.motionKernel = cv2.warpAffine(kern, A, (self.zSize, self.zSize), flags = cv2.INTER_CUBIC)
return self
def computeDefocusKernel(self):
kern = np.zeros((self.zSize, self.zSize), np.uint8)
cv2.circle(kern, (self.zSize, self.zSize), self.psfKernSize, 255, -1, cv2.LINE_AA, shift=1)
self.defocusKernel = np.float32(kern) / 255.0
return self
def createKernels(self):
self.computeDefocusKernel()
self.computeMotionKernel()
self.motionKernel /= self.motionKernel.sum()
self.defocusKernel /= self.defocusKernel.sum()
psfPadDefocus = np.zeros((self.imageSize[0], self.imageSize[1]),np.float32)
psfPadMotion = psfPadDefocus
self.psfKernSize
psfPadDefocus[:self.zSize, :self.zSize] = self.motionKernel
psfPadMotion[:self.zSize, :self.zSize] = self.defocusKernel
self.convolutionKernel = [psfPadDefocus, psfPadMotion]
return self
def preProcessDFT(self):
nrows = cv2.getOptimalDFTSize(self.imageSize[0])
ncols = cv2.getOptimalDFTSize(self.imageSize[1])
bottomPadding = nrows - self.imageSize[0]
rightPadding = ncols - self.imageSize[1]
self.paddingRanges = [rightPadding, bottomPadding]
return self
def computeKernelSpectra(self):
# https://github.com/opencv/opencv/blob/master/samples/cpp/tutorial_code/ImgProc/out_of_focus_deblur_filter/out_of_focus_deblur_filter.cpp
# https://docs.opencv.org/ref/master/de/d3c/tutorial_out_of_focus_deblur_filter.html
# https://github.com/lvxiaoxin/Wiener-filter/blob/master/main.py
# https://github.com/opencv/opencv/blob/master/samples/python/deconvolution.py
self.preProcessDFT()
psfOptSizeDefocus = cv2.copyMakeBorder(self.convolutionKernel[0], 0, self.paddingRanges[1], 0, self.paddingRanges[0], cv2.BORDER_CONSTANT, value = 0)
psfOptSizeMotion = cv2.copyMakeBorder(self.convolutionKernel[1], 0, self.paddingRanges[1], 0, self.paddingRanges[0], cv2.BORDER_CONSTANT, value = 0)
OTFDefocus = cv2.dft(psfOptSizeDefocus, flags = cv2.DFT_COMPLEX_OUTPUT, nonzeroRows = self.zSize)
OTFMotion = cv2.dft(psfOptSizeMotion, flags = cv2.DFT_COMPLEX_OUTPUT, nonzeroRows = self.zSize)
self.OTF = cv2.mulSpectrums(OTFDefocus,OTFMotion, False)
self.WignerKernel = self.OTF / ((self.OTF**2).sum(-1) + self.noise)[...,np.newaxis]
return self
def deconvoluteImage(self, imageToProcess):
imageOptPadding = cv2.copyMakeBorder(imageToProcess, 0, self.paddingRanges[1], 0, self.paddingRanges[0], cv2.BORDER_CONSTANT, value = 0)
imageSpectrum = cv2.dft(np.float32(imageOptPadding)/255.0, flags = cv2.DFT_COMPLEX_OUTPUT)
filteredSpectra = cv2.mulSpectrums(imageSpectrum, self.WignerKernel, False)
filteredImage = cv2.idft(filteredSpectra, flags = cv2.DFT_SCALE | cv2.DFT_REAL_OUTPUT)
filteredImage = np.roll(filteredImage, - self.zSize //2, 0)
filteredImage = np.roll(filteredImage, - self.zSize //2, 1)
self.filteredImage = 255*np.uint8(filteredImage/np.max(filteredImage.max(axis=0)))
return self.filteredImage
class imageEnhancing:
def __init__(self, inputFrame, clipLimit = 1.5, tileGridSize=(4,4)):
# https://www.mathworks.com/discovery/image-enhancement.html
self.normalImage = inputFrame
self.unsharpMask = np.zeros((inputFrame.shape[0], inputFrame.shape[1]), np.uint8)
self.sharpenedImage = np.zeros((inputFrame.shape[0], inputFrame.shape[1]), np.uint8)
self.clahe = cv2.createCLAHE(clipLimit, tileGridSize)
def blurrImage(self, kSize = 5, sigma = 10):
# http://melvincabatuan.github.io/Image-Filtering/
self.blurredImage = cv2.GaussianBlur(self.normalImage, (kSize,kSize), sigma, sigma)
return self
def unsharpImage(self, sharpening):
#https://homepages.inf.ed.ac.uk/rbf/HIPR2/unsharp.htm
self.blurrImage()
cv2.addWeighted(self.normalImage, 1, self.blurredImage, -1, 0, self.unsharpMask)
cv2.addWeighted(self.normalImage, 1, self.unsharpMask, sharpening,0,self.sharpenedImage)
return self
def LOGImage(self):
self.blurrImage()
self.Laplacian = cv2.Laplacian(self.blurredImage, ddepth=cv2.CV_64F)
cv2.threshold(self.Laplacian ,0,255.0,cv2.THRESH_TOZERO)
self.Laplacian = np.uint8(self.Laplacian) # convert to uint8
return self
def unsharpLOGImage(self, sharpening):
self.LOGImage()
#cv2.addWeighted(self.normalImage, 1, self.Laplacian, -1, 0, self.unsharpMask)
cv2.addWeighted(self.normalImage, 1, self.Laplacian, sharpening,0,self.sharpenedImage)
return self
def equalizeImage(self):
self.sharpenedImage = cv2.equalizeHist(self.sharpenedImage)
return self
def applyCLAHE(self):
self.sharpenedImage = self.clahe.apply(self.sharpenedImage)
return self
def getProcImage(self):
return self.sharpenedImage
class imageSegmentation:
def __init__(self, inputFrame, colorFrame = None, kMrpSize = 7,drawEnabled = False):
self.pipelineImage = inputFrame
self.drawEnabled = drawEnabled
if colorFrame is not None:
self.colorFlag = True
self.colorCopy = colorFrame
else:
self.colorFlag = False
self.colorCopy = inputFrame.copy()
self.morphKernel = np.ones((kMrpSize, kMrpSize), np.uint8)
pass
def detectEdges(self, sigma = 0, sobelAperture = 20):
medianValue = np.median(self.pipelineImage)
lowerThreshold = int(max(0, (1.0 - sigma) * medianValue))
upperThreshold = int(min(255, (1.0 + sigma) * medianValue))
self.binaryMap = cv2.Canny(self.pipelineImage, lowerThreshold, upperThreshold,sobelAperture)
#self.attackBlob()
return self
def createGloblBlobs(self,minTruncLevel = 70, maxTruncLevel = 190):
self.binaryMap = cv2.threshold(self.pipelineImage,minTruncLevel,\
maxTruncLevel, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
self.attackBlob()
return self
def createLocalBlobs(self,truncLevel = 100, blkSize = 13,levelOffset = 5):
self.binaryMap = cv2.adaptiveThreshold(self.pipelineImage,truncLevel,\
cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,blkSize,levelOffset)
self.attackBlob()
return self
def getMSERcontours(self):
# https://github.com/opencv/opencv/blob/master/samples/python/mser.py
localMser = cv2.MSER_create()
regions = np.array(localMser.detectRegions(self.pipelineImage)[0])
self.contours = [cv2.convexHull(p.reshape(-1,1,2)) for p in regions]
return self
def attackBlob(self):
self.binaryMap = cv2.erode(self.binaryMap,self.morphKernel) #cv2.morphologyEx(self.binaryMap, cv2.MORPH_OPEN, None)
return self
def analyzeShape(self):
# https://www.learnopencv.com/blob-detection-using-opencv-python-c/
# https://docs.opencv.org/3.0-beta/modules/imgproc/doc/structural_analysis_and_shape_descriptors.html
polySidesData = []
polyHuMData = []
rectData = []
sideCounter = []
centroidData = []
for shape in self.contours:
hull = cv2.convexHull(shape)
approx = cv2.approxPolyDP(hull,0.04*cv2.arcLength(hull,True),True)
sidesApprox = len(approx)
if sidesApprox == 3:
polygon = "triangle"
elif sidesApprox == 4:
(x, y, w, h) = cv2.boundingRect(approx)
ar = w / float(h)
polygon = "square" if ar >= 0.95 and ar <= 1.05 else "rectangle"
elif sidesApprox == 5:
polygon = "pentagon"
else:
polygon = "circle"
M = cv2.moments(hull)
# https://docs.opencv.org/3.1.0/dd/d49/tutorial_py_contour_features.html
if M["m00"] > 600 and M["m00"] < 40000:
cX = int((M["m10"] / M["m00"]))
cY = int((M["m01"] / M["m00"]))
x,y,w,h = cv2.boundingRect(hull)
aspect_ratio = float(w)/h
#https://docs.opencv.org/3.4.3/d1/d32/tutorial_py_contour_properties.html
if aspect_ratio < 1.3 and aspect_ratio > 0.7:
polySidesData.append(approx)
polyHuMData.append(M)
rectData.append([x, y, x + w, y + h])
sideCounter.append(sidesApprox)
centroidData.append([cX,cY])
if self.colorFlag:
cv2.rectangle(self.colorCopy,(x,y),(x+w,y+h),(0,255,0),2)
else:
cv2.rectangle(self.pipelineImage,(x,y),(x+w,y+h),(255,255,255),2)
cv2.drawContours(self.pipelineImage, [shape], -1, (0, 255, 0), 2)
cv2.putText(self.pipelineImage, polygon, (cX, cY), cv2.FONT_HERSHEY_SIMPLEX,
0.5, (255, 255, 255), 2)
self.polygonData = [polySidesData, polyHuMData, rectData, sideCounter,centroidData]
return self
def getCCLedges(self, connectivity = 8):
self.connections = np.uint8(cv2.connectedComponents(cv2.Canny(self.binaryMap,0,255),connectivity)[1])
self.connections[self.connections > 0] = 255
return self
def getImageContours(self):
self.contours = cv2.findContours(self.binaryMap,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[1]
self.analyzeShape()
return self
def getCCLContours(self,connectivity = 8):
self.connections = np.uint8(cv2.connectedComponents(cv2.Canny(self.binaryMap,0,255),connectivity)[1])
self.connections[self.connections > 0] = 255
self.contours = cv2.findContours(self.connections,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)[1]
self.analyzeShape()
return self
class colorSegmentation:
def __init__(self):
# Indices: 0 -> Blue_A, 1 -> Blue_B, 2 -> Red, 3 -> Yellow, 4 -> Green, 5 -> Orange
self.colorMinRanges = [(0,30,20), (175,30,20), (105,60,20),\
(60,45,20),(30,30,20), (90,30,20)]
self.colorMaxRanges = [(26,255,255), (180,255,255), (140,255,255),\
(105,255,255),(60,255,255), (105,255,255)]
self.morphKernel = np.ones((5,5), np.uint8)
pass
def maskThresholdColor(self,HSVframe,colorIndex):
self.singleColorMask = cv2.inRange(HSVframe, self.colorMinRanges[colorIndex], \
self.colorMaxRanges[colorIndex])
return self
def computeAllmasks(self,HSVframe):
self.colorMasks = [self.maskThresholdColor(HSVframe,colorsIndex).reduceNoise().getSingleMask() for colorsIndex in range(5)]
return self
def reduceNoise(self):
self.singleColorMask = cv2.morphologyEx(self.singleColorMask , cv2.MORPH_OPEN, self.morphKernel)
self.singleColorMask = cv2.dilate(self.singleColorMask,self.morphKernel)
return self
def getSingleMask(self):
return self.singleColorMask
def getAllMasks(self):
return self.colorMasks
class CentroidTracker():
# https://www.pyimagesearch.com/2018/07/23/simple-object-tracking-with-opencv/
# https://www.pyimagesearch.com/2018/07/30/opencv-object-tracking/
def __init__(self, maxDisappeared = 4):
self.nextObjectID = 0
self.objects = OrderedDict()
self.disappeared = OrderedDict()
self.maxDisappeared = maxDisappeared
def register(self, centroid, boxData,sides):
self.objects[self.nextObjectID] = [centroid,boxData,sides]
self.disappeared[self.nextObjectID] = 0
self.nextObjectID += 1
def deregister(self, objectID):
del self.objects[objectID]
del self.disappeared[objectID]
def update(self, inputCentroids,inputBoxData,inputSides):
centroidsCount = len(inputCentroids)
if centroidsCount == 0:
for objectID in self.disappeared.copy().keys():
self.disappeared[objectID] += 1
if self.disappeared[objectID] > self.maxDisappeared:
self.deregister(objectID)
return self
if len(self.objects) == 0:
for i in range(0, centroidsCount):
self.register(inputCentroids[i],inputBoxData[i],inputSides[i])
else:
objectIDs = list(self.objects.keys())
objectCentroids = [row[0] for row in list(self.objects.values())]
D = dist.cdist(np.array(objectCentroids), inputCentroids)
rows = D.min(axis=1).argsort()
cols = D.argmin(axis=1)[rows]
usedRows = set()
usedCols = set()
for (row, col) in zip(rows, cols):
if row in usedRows or col in usedCols:
continue
objectID = objectIDs[row]
self.objects[objectID] = [inputCentroids[col],inputBoxData[col],inputSides[col]]
self.disappeared[objectID] = 0
usedRows.add(row)
usedCols.add(col)
unusedRows = set(range(0, D.shape[0])).difference(usedRows)
unusedCols = set(range(0, D.shape[1])).difference(usedCols)
if D.shape[0] >= D.shape[1]:
for row in unusedRows:
objectID = objectIDs[row]
self.disappeared[objectID] += 1
if self.disappeared[objectID] > self.maxDisappeared:
self.deregister(objectID)
else:
for col in unusedCols:
self.register(inputCentroids[col], inputBoxData[col], inputSides[col])
return self
def drawIndices(self,image):
boxData = []
objectLabel = []
contourSides = []
for (objectID, contourData) in self.objects.items():
text = "ID {}".format(objectID)
centroid = contourData[0]
boxData.append(contourData[1])
contourSides.append(contourData[2])
objectLabel.append(objectID)
try:
cv2.putText(image, text, (centroid[0] - 10, centroid[1] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.circle(image, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)
except:
print(centroid)
return image, objectLabel, boxData, contourSides
class featuresMatcher:
#https://docs.opencv.org/3.0-beta/modules/features2d/doc/feature_detection_and_description.html
#https://docs.opencv.org/3.4.3/d1/d89/tutorial_py_orb.html
#https://docs.opencv.org/3.3.0/db/d27/tutorial_py_table_of_contents_feature2d.html
#https://www.pyimagesearch.com/2014/04/07/building-pokedex-python-indexing-sprites-using-shape-descriptors-step-3-6/
def __init__(self, imagesPath, saveDirectories = None):
self.dataBasePath = imagesPath
if saveDirectories is None:
self.saveDir = [imagesPath + '/Red_Entries', imagesPath + '/Yellow_Entries']
else:
self.saveDir = saveDirectories
self.pickledDBPath = self.dataBasePath + "/DBFeatures.pck"
#Keypoint descriptors
self.ORBFeatures = cv2.ORB_create(nfeatures = 50, scaleFactor = 1.5,nlevels = 6, \
edgeThreshold = 51,firstLevel = 0, scoreType = 0,\
patchSize = 51)
#Texture descriptors
self.HOGFeatures = cv2.HOGDescriptor(_winSize=(10, 10), _blockSize=(8, 8),\
_blockStride=(1, 1),_cellSize=(8, 8),\
_nbins=9,_derivAperture = 1, _winSigma =-1,\
_histogramNormType = 0,_L2HysThreshold =0.2,\
_gammaCorrection =False,_nlevels =64)
self.ORBMatcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck = True)
self.HOGMatcher = cv2.BFMatcher(cv2.NORM_L2, crossCheck = False)
self.previousLabels = []
self.dataSet = [[[] for x in range(3)] for y in range(3)]
pass
def cropImage(self,image,ROIBox):
image = image[ROIBox[1]:ROIBox[3], ROIBox[0]:ROIBox[2]]
return image
def describeFeatures(self,image,visualize = False):
# http://scikit-image.org/docs/dev/auto_examples/features_detection/plot_hog.html
# https://gurus.pyimagesearch.com/lesson-sample-histogram-of-oriented-gradients-and-car-logo-recognition/
# https://www.learnopencv.com/histogram-of-oriented-gradients/
image = cv2.resize(image, (100,100), interpolation = cv2.INTER_AREA)
HOGVector = self.HOGFeatures.compute(image,winStride = (8,8), padding = (8,8))
if visualize:
keyPoints, ORBDescript = self.ORBFeatures.detectAndCompute(image,None)
HOGImage = feature.hog(image, orientations=3, pixels_per_cell=(8, 8),\
cells_per_block=(4, 4), transform_sqrt = False, block_norm="L1",\
visualize = True, multichannel=True)[1]
HOGImage = np.uint8(cv2.normalize(HOGImage, None, 0, 255, cv2.NORM_MINMAX))
HOGImage = cv2.drawKeypoints(HOGImage, keyPoints, None, color=(0,255,0), flags=0)
return HOGImage, [keyPoints,ORBDescript], HOGVector
else:
ORBDescript = self.ORBFeatures.compute(image,None)
return image, ORBDescript, HOGVector
def batchExtractor(self,dataToSave):
# saving all our feature vectors in pickled file
with open(self.pickledDBPath, 'wb') as fp: pickle.dump(dataToSave, fp)
return self
def batchLoader(self):
with open(self.pickledDBPath,'rb') as fp: data = pickle.load(fp)
return data
def computeTemplateSet(self):
if not glob.glob(self.dataBasePath + '/*.pck'):
for (i,filename) in enumerate(sorted(glob.iglob(self.dataBasePath + '/*.jpg'))):
colorGroupIndex = int(filename.split('_')[2])
image = cv2.imread(filename)
image = cv2.resize(image, (100,100), interpolation = cv2.INTER_AREA)
image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)[:,:,2]
if len(image.shape) != 3:
imageData, ORBData, HOGData = self.describeFeatures(image, visualize = False)
else:
imageData, ORBData, HOGData = self.describeFeatures(image, visualize = True)
self.dataSet[colorGroupIndex][0].append(image)
self.dataSet[colorGroupIndex][1].append(ORBData)
self.dataSet[colorGroupIndex][2].append(HOGData)
self.batchExtractor(self.dataSet)
else:
self.dataSet = self.batchLoader()
return self
def getTemplateIndex(self,colorGroup,vertexNum):
if colorGroup == 0:
if vertexNum == 8:
index = 1
elif vertexNum == 3:
index = 2
else:
index = 0
else:
index = 0
return index
def computeMatchesVar(self,dataMatches, dataRelRange = 0.6):
if dataMatches != []:
dataMatches = sorted(dataMatches, key = lambda x:x.distance)
distances = [o.distance for o in dataMatches[0:int(len(dataMatches)*dataRelRange)]]
variance = np.var(distances)
return variance
else:
return[]
def computeHOGMetrics(self, HOGData1, HOGData2, dataRelRange = 1.0):
dataLimit = int(len(HOGData1)*dataRelRange)
HOGData1 = HOGData1[0:dataLimit]
HOGData2 = HOGData2[0:dataLimit]
directDifference = HOGData1 - HOGData2
# HOGMatches = self.HOGMatcher.match(HOGData1,HOGData2,None)
# distances = [o.distance for o in HOGMatches]
# distanceRMS = np.sqrt(np.mean(np.power(distances,2)))
normalizedDIST = np.var(directDifference)/(np.var(HOGData1) + np.var(HOGData2))
differenceRMS = np.sqrt(np.mean(np.power(directDifference,2)))
# relativeError = 1 - abs(np.mean((directDifference/HOGData2)))
cosineSimilarity = dist.cosine(HOGData1,HOGData2)
return [cosineSimilarity,differenceRMS,normalizedDIST,np.mean(directDifference)]
def matchFeatures(self,image,objectLabels,ROISets,colorGroup,contourSides,displayFrame):
# https://docs.opencv.org/3.4.3/dc/dc3/tutorial_py_matcher.html
if ROISets != []:
initvalidindex = sum(np.in1d(objectLabels,self.previousLabels))
self.previousLabels = objectLabels
inputLabelsLen = len(objectLabels)
for validLabel in range(initvalidindex,inputLabelsLen):
imageROI = self.cropImage(image,ROISets[validLabel])
if imageROI.size != 0:
index = self.getTemplateIndex(colorGroup,contourSides)
ORBData, HOGData = self.describeFeatures(imageROI)[1:3]
ORBMatches = self.ORBMatcher.knnMatch(ORBData[1],self.dataSet[colorGroup][1][index][1],50)
ORBVariance = self.computeMatchesVar(ORBMatches)
HOGMetrics = self.computeHOGMetrics(HOGData,self.dataSet[colorGroup][2][index])
convMatching = cv2.matchTemplate(cv2.resize(imageROI, (80,80), interpolation = cv2.INTER_AREA),self.dataSet[colorGroup][0][index],cv2.TM_CCORR_NORMED)
convMatching = cv2.normalize(convMatching, None, 0, 1, cv2.NORM_MINMAX,-1)
convMetric = convMatching.mean()
print('Index:', index, '-- Group:', colorGroup, '-- ID:', objectLabels[validLabel])
print('ORB:', ORBVariance)
print('HOG:', HOGMetrics)
print('CORR:', convMetric,'\r\n')
if convMetric >= 0.55 and HOGMetrics[0] > 0.3 and HOGMetrics[1] < 0.3 and HOGMetrics[2] > 0.8 and HOGMetrics[3] < 0.15:
# nameFilename = self.saveDir[colorGroup] + '/ID_' + str(objectLabels[validLabel]) + '_Group_' + str(colorGroup) + '.jpg'
nameFilename = os.path.join(self.saveDir[colorGroup],'ID_' + str(objectLabels[validLabel]) + '_Group_' + str(colorGroup) + '.jpg')
cv2.imwrite(nameFilename, self.cropImage(displayFrame,ROISets[validLabel]))
print('ID', objectLabels[validLabel], 'was saved\r\n')
else:
print('Index:', index, '-- Group:', colorGroup, '-- ID:', objectLabels[validLabel])
print('FAILED TO REGISTER\r\n')
for drawings in range(inputLabelsLen):
displayFrame = cv2.rectangle(displayFrame.copy(),(ROISets[drawings][0],ROISets[drawings][1]),(ROISets[drawings][2],ROISets[drawings][3]),(0,255,0),3)
return displayFrame
#%% Camera initialization
widthRes = 640
heightRes= 480
cameraStream = liveStreaming(0, widthRes, heightRes).startThread()
#%% Display Properties
cv2.namedWindow('Frame',cv2.WINDOW_NORMAL)
cv2.resizeWindow('Frame', widthRes,heightRes)
out = cv2.VideoWriter('outpy.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 15, (widthRes,heightRes))
#%% Target Directories
templateDirectory = '/home/pi/dataSet'
saveDirectories = [templateDirectory + '/Red_Entries', templateDirectory + '/Yellow_Entries']
#%% Initialize Features descriptors
colorGroup = [0,1]
colorThreshold = [2,3]
colorSetLenght = len(colorGroup)
spatialMatcher = featuresMatcher(templateDirectory,saveDirectories).computeTemplateSet()
#%% Obtect tracker
blobTracker = CentroidTracker()
#%% Image Enhancers initialization
colorMask = np.zeros((heightRes,widthRes), dtype = "uint8")
#WignerFilter = deconvolutionFilter([heightRes,widthRes])
imageBalancer = colorEnhancing()
colorSegmentator = colorSegmentation();
time.sleep(1.0)
fps = FPS().start()
#%% Frames recording
while True:
frame = cameraStream.readFrame()
frame = imageBalancer.balanceImage(frame)
HSVFrame = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV) # cv2.COLOR_RGB2GRAY
#Vframe = WignerFilter.deconvoluteImage(Vframe)
HSVFrame[:,:,2] = cv2.medianBlur(HSVFrame[:,:,2],3)
preProcessor = imageEnhancing(HSVFrame[:,:,2]).unsharpImage(2).applyCLAHE()
processedVFrame = preProcessor.getProcImage()
for color in range(colorSetLenght):
# time.sleep(0.025)
colorMask = colorSegmentator.maskThresholdColor(HSVFrame,colorThreshold[color]).reduceNoise().getSingleMask()
colorLimitedFrame = cv2.bitwise_and(processedVFrame, colorMask)
segmentator = imageSegmentation(colorLimitedFrame,drawEnabled = False).createGloblBlobs().getImageContours()
imageBorders = segmentator.pipelineImage
pipelineImage, validLabels, ROIs, contourSides = blobTracker.update(segmentator.polygonData[4],\
segmentator.polygonData[2],segmentator.polygonData[3]).drawIndices(imageBorders)
frame = spatialMatcher.matchFeatures(HSVFrame[:,:,2], validLabels,ROIs, colorGroup[color], contourSides, frame)
out.write(frame)
cv2.imshow("Frame", frame)
if cv2.waitKey(1) & 0xFF == ord('q'): # exit order
break
fps.update()
#name = 'frame_' + str(8) + '.jpg'
#cv2.imwrite(name, processedFrame)
#%% Release camera resources
fps.stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
# When everything done, release the capture
cameraStream.streaming.release()
out.release()
cv2.destroyAllWindows()
|
train.py
|
#!/usr/bin python3
""" The script to run the training process of faceswap """
import logging
import os
import sys
from threading import Lock
from time import sleep
import cv2
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
from lib.keypress import KBHit
from lib.multithreading import MultiThread
from lib.queue_manager import queue_manager
from lib.utils import cv2_read_img, get_folder, get_image_paths, set_system_verbosity
from plugins.plugin_loader import PluginLoader
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class Train():
""" The training process. """
def __init__(self, arguments):
logger.debug("Initializing %s: (args: %s", self.__class__.__name__, arguments)
self.args = arguments
self.timelapse = self.set_timelapse()
self.images = self.get_images()
self.stop = False
self.save_now = False
self.preview_buffer = dict()
self.lock = Lock()
self.trainer_name = self.args.trainer
logger.debug("Initialized %s", self.__class__.__name__)
def set_timelapse(self):
""" Set timelapse paths if requested """
if (not self.args.timelapse_input_a and
not self.args.timelapse_input_b and
not self.args.timelapse_output):
return None
if not self.args.timelapse_input_a or not self.args.timelapse_input_b:
raise ValueError("To enable the timelapse, you have to supply "
"all the parameters (--timelapse-input-A and "
"--timelapse-input-B).")
timelapse_output = None
if self.args.timelapse_output is not None:
timelapse_output = str(get_folder(self.args.timelapse_output))
for folder in (self.args.timelapse_input_a,
self.args.timelapse_input_b,
timelapse_output):
if folder is not None and not os.path.isdir(folder):
raise ValueError("The Timelapse path '{}' does not exist".format(folder))
kwargs = {"input_a": self.args.timelapse_input_a,
"input_b": self.args.timelapse_input_b,
"output": timelapse_output}
logger.debug("Timelapse enabled: %s", kwargs)
return kwargs
def get_images(self):
""" Check the image dirs exist, contain images and return the image
objects """
logger.debug("Getting image paths")
images = dict()
for side in ("a", "b"):
image_dir = getattr(self.args, "input_{}".format(side))
if not os.path.isdir(image_dir):
logger.error("Error: '%s' does not exist", image_dir)
exit(1)
if not os.listdir(image_dir):
logger.error("Error: '%s' contains no images", image_dir)
exit(1)
images[side] = get_image_paths(image_dir)
logger.info("Model A Directory: %s", self.args.input_a)
logger.info("Model B Directory: %s", self.args.input_b)
logger.debug("Got image paths: %s", [(key, str(len(val)) + " images")
for key, val in images.items()])
return images
def process(self):
""" Call the training process object """
logger.debug("Starting Training Process")
logger.info("Training data directory: %s", self.args.model_dir)
set_system_verbosity(self.args.loglevel)
thread = self.start_thread()
# queue_manager.debug_monitor(1)
err = self.monitor(thread)
self.end_thread(thread, err)
logger.debug("Completed Training Process")
def start_thread(self):
""" Put the training process in a thread so we can keep control """
logger.debug("Launching Trainer thread")
thread = MultiThread(target=self.training)
thread.start()
logger.debug("Launched Trainer thread")
return thread
def end_thread(self, thread, err):
""" On termination output message and join thread back to main """
logger.debug("Ending Training thread")
if err:
msg = "Error caught! Exiting..."
log = logger.critical
else:
msg = ("Exit requested! The trainer will complete its current cycle, "
"save the models and quit (This can take a couple of minutes "
"depending on your training speed).")
if not self.args.redirect_gui:
msg += " If you want to kill it now, press Ctrl + c"
log = logger.info
log(msg)
self.stop = True
thread.join()
sys.stdout.flush()
logger.debug("Ended Training thread")
def training(self):
""" The training process to be run inside a thread """
try:
sleep(1) # Let preview instructions flush out to logger
logger.debug("Commencing Training")
logger.info("Loading data, this may take a while...")
if self.args.allow_growth:
self.set_tf_allow_growth()
model = self.load_model()
trainer = self.load_trainer(model)
self.run_training_cycle(model, trainer)
except KeyboardInterrupt:
try:
logger.debug("Keyboard Interrupt Caught. Saving Weights and exiting")
model.save_models()
trainer.clear_tensorboard()
except KeyboardInterrupt:
logger.info("Saving model weights has been cancelled!")
exit(0)
except Exception as err:
raise err
def load_model(self):
""" Load the model requested for training """
logger.debug("Loading Model")
model_dir = get_folder(self.args.model_dir)
configfile = self.args.configfile if hasattr(self.args, "configfile") else None
augment_color = not self.args.no_augment_color
model = PluginLoader.get_model(self.trainer_name)(
model_dir,
gpus=self.args.gpus,
configfile=configfile,
snapshot_interval=self.args.snapshot_interval,
no_logs=self.args.no_logs,
warp_to_landmarks=self.args.warp_to_landmarks,
augment_color=augment_color,
no_flip=self.args.no_flip,
training_image_size=self.image_size,
alignments_paths=self.alignments_paths,
preview_scale=self.args.preview_scale,
pingpong=self.args.pingpong,
memory_saving_gradients=self.args.memory_saving_gradients,
optimizer_savings=self.args.optimizer_savings,
predict=False)
logger.debug("Loaded Model")
return model
@property
def image_size(self):
""" Get the training set image size for storing in model data """
image = cv2_read_img(self.images["a"][0], raise_error=True)
size = image.shape[0]
logger.debug("Training image size: %s", size)
return size
@property
def alignments_paths(self):
""" Set the alignments path to input dirs if not provided """
alignments_paths = dict()
for side in ("a", "b"):
alignments_path = getattr(self.args, "alignments_path_{}".format(side))
if not alignments_path:
image_path = getattr(self.args, "input_{}".format(side))
alignments_path = os.path.join(image_path, "alignments.json")
alignments_paths[side] = alignments_path
logger.debug("Alignments paths: %s", alignments_paths)
return alignments_paths
def load_trainer(self, model):
""" Load the trainer requested for training """
logger.debug("Loading Trainer")
trainer = PluginLoader.get_trainer(model.trainer)
trainer = trainer(model,
self.images,
self.args.batch_size,
self.args.configfile)
logger.debug("Loaded Trainer")
return trainer
def run_training_cycle(self, model, trainer):
""" Perform the training cycle """
logger.debug("Running Training Cycle")
if self.args.write_image or self.args.redirect_gui or self.args.preview:
display_func = self.show
else:
display_func = None
for iteration in range(0, self.args.iterations):
logger.trace("Training iteration: %s", iteration)
save_iteration = iteration % self.args.save_interval == 0
viewer = display_func if save_iteration or self.save_now else None
timelapse = self.timelapse if save_iteration else None
trainer.train_one_step(viewer, timelapse)
if self.stop:
logger.debug("Stop received. Terminating")
break
if save_iteration:
logger.trace("Save Iteration: (iteration: %s", iteration)
if self.args.pingpong:
model.save_models()
trainer.pingpong.switch()
else:
model.save_models()
elif self.save_now:
logger.trace("Save Requested: (iteration: %s", iteration)
model.save_models()
self.save_now = False
logger.debug("Training cycle complete")
model.save_models()
trainer.clear_tensorboard()
self.stop = True
def monitor(self, thread):
""" Monitor the console, and generate + monitor preview if requested """
is_preview = self.args.preview
logger.debug("Launching Monitor")
logger.info("R|===================================================")
logger.info("R| Starting")
if is_preview:
logger.info("R| Using live preview")
logger.info("R| Press '%s' to save and quit",
"Terminate" if self.args.redirect_gui else "ENTER")
if not self.args.redirect_gui:
logger.info("R| Press 'S' to save model weights immediately")
logger.info("R|===================================================")
keypress = KBHit(is_gui=self.args.redirect_gui)
err = False
while True:
try:
if is_preview:
with self.lock:
for name, image in self.preview_buffer.items():
cv2.imshow(name, image) # pylint: disable=no-member
cv_key = cv2.waitKey(1000) # pylint: disable=no-member
else:
cv_key = None
if thread.has_error:
logger.debug("Thread error detected")
err = True
break
if self.stop:
logger.debug("Stop received")
break
# Preview Monitor
if is_preview and (cv_key == ord("\n") or cv_key == ord("\r")):
logger.debug("Exit requested")
break
if is_preview and cv_key == ord("s"):
logger.info("Save requested")
self.save_now = True
# Console Monitor
if keypress.kbhit():
console_key = keypress.getch()
if console_key in ("\n", "\r"):
logger.debug("Exit requested")
break
if console_key in ("s", "S"):
logger.info("Save requested")
self.save_now = True
sleep(1)
except KeyboardInterrupt:
logger.debug("Keyboard Interrupt received")
break
keypress.set_normal_term()
logger.debug("Closed Monitor")
return err
@staticmethod
def keypress_monitor(keypress_queue):
""" Monitor stdin for keypress """
while True:
keypress_queue.put(sys.stdin.read(1))
@staticmethod
def set_tf_allow_growth():
""" Allow TensorFlow to manage VRAM growth """
# pylint: disable=no-member
logger.debug("Setting Tensorflow 'allow_growth' option")
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = "0"
set_session(tf.Session(config=config))
logger.debug("Set Tensorflow 'allow_growth' option")
def show(self, image, name=""):
""" Generate the preview and write preview file output """
logger.trace("Updating preview: (name: %s)", name)
try:
scriptpath = os.path.realpath(os.path.dirname(sys.argv[0]))
if self.args.write_image:
logger.trace("Saving preview to disk")
img = "training_preview.jpg"
imgfile = os.path.join(scriptpath, img)
cv2.imwrite(imgfile, image) # pylint: disable=no-member
logger.trace("Saved preview to: '%s'", img)
if self.args.redirect_gui:
logger.trace("Generating preview for GUI")
img = ".gui_training_preview.jpg"
imgfile = os.path.join(scriptpath, "lib", "gui",
".cache", "preview", img)
cv2.imwrite(imgfile, image) # pylint: disable=no-member
logger.trace("Generated preview for GUI: '%s'", img)
if self.args.preview:
logger.trace("Generating preview for display: '%s'", name)
with self.lock:
self.preview_buffer[name] = image
logger.trace("Generated preview for display: '%s'", name)
except Exception as err:
logging.error("could not preview sample")
raise err
logger.trace("Updated preview: (name: %s)", name)
|
lldb_batchmode.py
|
# This script allows to use LLDB in a way similar to GDB's batch mode. That is, given a text file
# containing LLDB commands (one command per line), this script will execute the commands one after
# the other.
# LLDB also has the -s and -S commandline options which also execute a list of commands from a text
# file. However, this command are execute `immediately`: the command of a `run` or `continue`
# command will be executed immediately after the `run` or `continue`, without waiting for the next
# breakpoint to be hit. This a command sequence like the following will not yield reliable results:
#
# break 11
# run
# print x
#
# Most of the time the `print` command will be executed while the program is still running will thus
# fail. Using this Python script, the above will work as expected.
from __future__ import print_function
import lldb
import os
import sys
import threading
import thread
import re
import time
# Set this to True for additional output
DEBUG_OUTPUT = False
def print_debug(s):
"""Print something if DEBUG_OUTPUT is True"""
global DEBUG_OUTPUT
if DEBUG_OUTPUT:
print("DEBUG: " + str(s))
def normalize_whitespace(s):
"""Replace newlines, tabs, multiple spaces, etc with exactly one space"""
return re.sub("\s+", " ", s)
def breakpoint_callback(frame, bp_loc, dict):
"""This callback is registered with every breakpoint and makes sure that the
frame containing the breakpoint location is selected"""
print("Hit breakpoint " + str(bp_loc))
# Select the frame and the thread containing it
frame.thread.process.SetSelectedThread(frame.thread)
frame.thread.SetSelectedFrame(frame.idx)
# Returning True means that we actually want to stop at this breakpoint
return True
# This is a list of breakpoints that are not registered with the breakpoint callback. The list is
# populated by the breakpoint listener and checked/emptied whenever a command has been executed
new_breakpoints = []
# This set contains all breakpoint ids that have already been registered with a callback, and is
# used to avoid hooking callbacks into breakpoints more than once
registered_breakpoints = set()
def execute_command(command_interpreter, command):
"""Executes a single CLI command"""
global new_breakpoints
global registered_breakpoints
res = lldb.SBCommandReturnObject()
print(command)
command_interpreter.HandleCommand(command, res)
if res.Succeeded():
if res.HasResult():
print(normalize_whitespace(res.GetOutput() or ''), end='\n')
# If the command introduced any breakpoints, make sure to register
# them with the breakpoint
# callback
while len(new_breakpoints) > 0:
res.Clear()
breakpoint_id = new_breakpoints.pop()
if breakpoint_id in registered_breakpoints:
print_debug("breakpoint with id %s is already registered. Ignoring." %
str(breakpoint_id))
else:
print_debug("registering breakpoint callback, id = " + str(breakpoint_id))
callback_command = ("breakpoint command add -F breakpoint_callback " +
str(breakpoint_id))
command_interpreter.HandleCommand(callback_command, res)
if res.Succeeded():
print_debug("successfully registered breakpoint callback, id = " +
str(breakpoint_id))
registered_breakpoints.add(breakpoint_id)
else:
print("Error while trying to register breakpoint callback, id = " +
str(breakpoint_id))
else:
print(res.GetError())
def start_breakpoint_listener(target):
"""Listens for breakpoints being added and adds new ones to the callback
registration list"""
listener = lldb.SBListener("breakpoint listener")
def listen():
event = lldb.SBEvent()
try:
while True:
if listener.WaitForEvent(120, event):
if lldb.SBBreakpoint.EventIsBreakpointEvent(event) and \
lldb.SBBreakpoint.GetBreakpointEventTypeFromEvent(event) == \
lldb.eBreakpointEventTypeAdded:
global new_breakpoints
breakpoint = lldb.SBBreakpoint.GetBreakpointFromEvent(event)
print_debug("breakpoint added, id = " + str(breakpoint.id))
new_breakpoints.append(breakpoint.id)
except:
print_debug("breakpoint listener shutting down")
# Start the listener and let it run as a daemon
listener_thread = threading.Thread(target=listen)
listener_thread.daemon = True
listener_thread.start()
# Register the listener with the target
target.GetBroadcaster().AddListener(listener, lldb.SBTarget.eBroadcastBitBreakpointChanged)
def start_watchdog():
"""Starts a watchdog thread that will terminate the process after a certain
period of time"""
watchdog_start_time = time.clock()
watchdog_max_time = watchdog_start_time + 30
def watchdog():
while time.clock() < watchdog_max_time:
time.sleep(1)
print("TIMEOUT: lldb_batchmode.py has been running for too long. Aborting!")
thread.interrupt_main()
# Start the listener and let it run as a daemon
watchdog_thread = threading.Thread(target=watchdog)
watchdog_thread.daemon = True
watchdog_thread.start()
####################################################################################################
# ~main
####################################################################################################
if len(sys.argv) != 3:
print("usage: python lldb_batchmode.py target-path script-path")
sys.exit(1)
target_path = sys.argv[1]
script_path = sys.argv[2]
print("LLDB batch-mode script")
print("----------------------")
print("Debugger commands script is '%s'." % script_path)
print("Target executable is '%s'." % target_path)
print("Current working directory is '%s'" % os.getcwd())
# Start the timeout watchdog
start_watchdog()
# Create a new debugger instance
debugger = lldb.SBDebugger.Create()
# When we step or continue, don't return from the function until the process
# stops. We do this by setting the async mode to false.
debugger.SetAsync(False)
# Create a target from a file and arch
print("Creating a target for '%s'" % target_path)
target_error = lldb.SBError()
target = debugger.CreateTarget(target_path, None, None, True, target_error)
if not target:
print("Could not create debugging target '" + target_path + "': " +
str(target_error) + ". Aborting.", file=sys.stderr)
sys.exit(1)
# Register the breakpoint callback for every breakpoint
start_breakpoint_listener(target)
command_interpreter = debugger.GetCommandInterpreter()
try:
script_file = open(script_path, 'r')
for line in script_file:
command = line.strip()
if command == "run" or command == "r" or re.match("^process\s+launch.*", command):
# Before starting to run the program, let the thread sleep a bit, so all
# breakpoint added events can be processed
time.sleep(0.5)
if command != '':
execute_command(command_interpreter, command)
except IOError as e:
print("Could not read debugging script '%s'." % script_path, file=sys.stderr)
print(e, file=sys.stderr)
print("Aborting.", file=sys.stderr)
sys.exit(1)
finally:
debugger.Terminate()
script_file.close()
|
server.py
|
# Copyright (c) 2012 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# Simple REST server that takes commands in a JSON payload
import json
import os
import re
import atexit
import mimetypes
import tornado.ioloop
import tornado.web
import tornado.httpclient
import tornado.httpserver
import luigi.scheduler as scheduler
import luigi.interface as interface
import pkg_resources
try:
import pygraphviz
except:
pass
import signal
from cStringIO import StringIO
from luigi.rpc import RemoteSchedulerResponder
from luigi.scheduler import PENDING, DONE, FAILED, RUNNING
def _create_scheduler():
config = interface.get_config()
retry_delay = config.getfloat('scheduler', 'retry-delay', 900.0)
remove_delay = config.getfloat('scheduler', 'remove-delay', 600.0)
worker_disconnect_delay = config.getfloat('scheduler', 'worker-disconnect-delay', 60.0)
return scheduler.CentralPlannerScheduler(retry_delay, remove_delay, worker_disconnect_delay)
class RPCHandler(tornado.web.RequestHandler):
""" Handle remote scheduling calls using rpc.RemoteSchedulerResponder"""
scheduler = _create_scheduler()
api = RemoteSchedulerResponder(scheduler)
def get(self, method):
payload = self.get_argument('data', default="{}")
arguments = json.loads(payload)
if hasattr(self.api, method):
result = getattr(self.api, method)(**arguments)
self.write({"response": result}) # wrap all json response in a dictionary
else:
self.send_error(400)
class TableVisualizeHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
def get(self):
client = tornado.httpclient.AsyncHTTPClient()
# TODO: use rpc module instead of hardcoded graph
client.fetch("http://localhost:8082/api/graph", self.in_table)
def in_table(self, graph_response):
if graph_response.error is not None:
print "Got error from API server"
graph_response.rethrow()
if graph_response.code != 200:
print "Got response code %s from API server" % graph_response.code
self.send_error(graph_response.code)
tasks = json.loads(graph_response.body)["response"]
n_nodes = 0
# TODO: tasks need to be grouped by sample (group_by function)
# In other words, tasks *always* have to be associated
# with sample names in production
task_header = ["Project", "Flowcell", "Sample"]
status_line = ["J.Doe_00_01", "NA", "NA"]
colors = {PENDING: ('white', 'black'),
DONE: ('green', 'white'),
FAILED: ('red', 'white'),
RUNNING: ('blue', 'white'),
'BROKEN': ('orange', 'black'), # external task, can't run
}
for task, p in tasks.iteritems():
selector = p['status']
if selector == PENDING and not p['workers']:
selector = 'BROKEN'
label = task.replace('(', '\\n(').replace(',', ',\\n') # force GraphViz to break lines
taskname = task.split("(")[0]
task_header.append(taskname)
status_line.append(selector)
self.write("<table border=1 cellpadding=10>\n<tr>\n")
self.write(" ".join(["<th>{}</th>".format(x) for x in task_header]))
self.write("\n</tr><tr>\n")
self.write(" ".join(["<td bgcolor={}>{}</td>".format(colors.get(x, ('white', 'white'))[0], x) for x in status_line]))
self.write("\n</tr></table>")
# TODO: this code definitely should not live here:
html_header = pkg_resources.resource_string(__name__, 'static/header.html')
self.finish()
class VisualizeHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
def get(self):
client = tornado.httpclient.AsyncHTTPClient()
# TODO: use rpc module instead of hardcoded graph
client.fetch("http://localhost:8082/api/graph", self.on_graph)
def on_graph(self, graph_response):
""" TODO: clean this up using templates """
if graph_response.error is not None:
print "Got error from API server"
graph_response.rethrow()
if graph_response.code != 200:
print "Got response code %s from API server" % graph_response.code
self.send_error(graph_response.code)
# TODO: figure out the interface for this
# TODO: if there are too many nodes, we need to prune the view
# One idea: do a Dijkstra from all running nodes. Hide all nodes
# with distance >= 50.
tasks = json.loads(graph_response.body)["response"]
graphviz = pygraphviz.AGraph(directed=True, size=12)
n_nodes = 0
colors = {PENDING: ('white', 'black'),
DONE: ('green', 'white'),
FAILED: ('red', 'white'),
RUNNING: ('blue', 'white'),
'BROKEN': ('orange', 'black'), # external task, can't run
}
for task, p in tasks.iteritems():
selector = p['status']
if selector == PENDING and not p['workers']:
selector = 'BROKEN'
fillcolor = colors[selector][0]
fontcolor = colors[selector][1]
shape = 'box'
w = None
# Task is a unicode object representing the task_id
if re.search('use_long_names=True', task):
label = task.replace('(', '\\n(').replace(',', ',\\n') # force GraphViz to break lines
else:
label = task.replace('(', '\\n(').replace(',', ',\\n').split('\\n')[0]
m = re.search('target=([0-9a-zA-Z_\./\-]*),', task)
if m and re.search('use_target_names=True', task):
label = os.path.basename(m.group(1))
# FIXME: this is arbitrary; cannot get the width to work properly in GraphViz
w = len(label.encode('utf-8'))/72*12
colors = {PENDING: ('white', 'black'),
DONE: ('white', 'black'),
FAILED: ('white', 'black'),
RUNNING: ('white', 'black'),
'BROKEN': ('organge', 'black'), # external task, can't run
}
fillcolor = colors[selector][0]
fontcolor = colors[selector][1]
# TODO: if the ( or , is a part of the argument we shouldn't really break it
# TODO: FIXME: encoding strings is not compatible with newer pygraphviz
if w:
graphviz.add_node(task.encode('utf-8'), label=label.encode('utf-8'), style='filled', fillcolor=fillcolor, fontcolor=fontcolor, shape=shape, fontname='Helvetica', fontsize=11, width=w, fixedsize=True)
else:
graphviz.add_node(task.encode('utf-8'), label=label.encode('utf-8'), style='filled', fillcolor=fillcolor, fontcolor=fontcolor, shape=shape, fontname='Helvetica', fontsize=11)
n_nodes += 1
for task, p in tasks.iteritems():
for dep in p['deps']:
label = ""
if re.search('use_target_names=True', task):
label = task.replace('(', '\\n(').replace(',', ',\\n').split('\\n')[0]
graphviz.add_edge(dep, task, label=label)
if n_nodes < 200:
graphviz.layout('dot')
else:
# stupid workaround...
graphviz.layout('fdp')
graphviz.draw("graph.svg", format='svg')
graphviz.write("graph.dot")
s = StringIO()
graphviz.draw(s, format='svg')
s.seek(0)
svg = s.read()
# TODO: this code definitely should not live here:
html_header = pkg_resources.resource_string(__name__, 'static/header.html')
pattern = r'(<svg.*?)(<g id="graph[01]".*?)(</svg>)'
mo = re.search(pattern, svg, re.S)
self.write(''.join([html_header,
mo.group(1),
'<g id="viewport">',
mo.group(2),
'</g>',
mo.group(3),
"</body></html>"]))
self.finish()
class StaticFileHandler(tornado.web.RequestHandler):
def get(self, path):
# TODO: this is probably not the right way to do it...
# TODO: security
extension = os.path.splitext(path)[1]
if extension in mimetypes.types_map:
self.set_header("Content-Type", mimetypes.types_map[extension])
data = pkg_resources.resource_string(__name__, os.path.join("static", path))
self.write(data)
def apps(debug):
api_app = tornado.web.Application([
(r'/api/(.*)', RPCHandler),
], debug=debug)
visualizer_app = tornado.web.Application([
(r'/static/(.*)', StaticFileHandler),
(r'/', VisualizeHandler)
], debug=debug)
table_app = tornado.web.Application([
(r'/static/(.*)', StaticFileHandler),
(r'/', TableVisualizeHandler)
], debug=debug)
return api_app, visualizer_app, table_app
def run(visualizer_processes=1, visualizer_port=8081, api_port=8082, table_visualizer_port=8083):
""" Runs one instance of the API server and <visualizer_processes> visualizer servers
"""
import luigi.process as process
api_app, visualizer_app, table_app = apps(debug=False)
visualizer_sockets = tornado.netutil.bind_sockets(visualizer_port)
api_sockets = tornado.netutil.bind_sockets(api_port)
table_sockets = tornado.netutil.bind_sockets(table_visualizer_port)
proc, attempt = process.fork_linked_workers(1 + visualizer_processes)
if proc == 0:
# first process is API server
if attempt != 0:
print "API instance died. Will not restart."
exit(0) # will not be restarted if it dies, as it indicates an issue that should be fixed
print "Launching API instance"
# load scheduler state
RPCHandler.scheduler.load()
# prune work DAG every 10 seconds
pruner = tornado.ioloop.PeriodicCallback(RPCHandler.scheduler.prune, 10000)
pruner.start()
def shutdown_handler(foo=None, bar=None):
print "api instance shutting down..."
RPCHandler.scheduler.dump()
os._exit(0)
server = tornado.httpserver.HTTPServer(api_app)
server.add_sockets(api_sockets)
signal.signal(signal.SIGINT, shutdown_handler)
signal.signal(signal.SIGTERM, shutdown_handler)
signal.signal(signal.SIGQUIT, shutdown_handler)
atexit.register(shutdown_handler)
elif proc != 0:
# visualizers can die and will be restarted
print "Launching Visualizer instance %d (attempt %d)" % (proc, attempt)
server = tornado.httpserver.HTTPServer(visualizer_app)
server.add_sockets(visualizer_sockets)
server2 = tornado.httpserver.HTTPServer(table_app)
server2.add_sockets(table_sockets)
tornado.ioloop.IOLoop.instance().start()
def run_api_threaded(api_port=8082):
''' For unit tests'''
api_app, visualizer_app = apps(debug=False)
api_sockets = tornado.netutil.bind_sockets(api_port)
server = tornado.httpserver.HTTPServer(api_app)
server.add_sockets(api_sockets)
def run_tornado():
tornado.ioloop.IOLoop.instance().start()
import threading
threading.Thread(target=run_tornado).start()
def stop():
tornado.ioloop.IOLoop.instance().stop()
if __name__ == "__main__":
run()
|
base.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import webbrowser
import multiprocessing
import logging
from abc import ABCMeta, abstractmethod
from ava.runtime import settings
STR_STATUS = 'Ava - running...'
STR_OPEN_WEBFRONT = u'Open Webfront'
STR_EXIT = u'Quit Ava'
logger = logging.getLogger(__name__)
def start_server_process():
"""
Starts the server in the forked process.
:return: None
"""
from ava.core.agent import start_agent
start_agent()
class Supervisor(object):
"""
Responsible for monitor and manage the server process.
"""
def __init__(self, target=start_server_process):
self.server_process = None
self.restarted_times = 0
self.target = target
def start_server(self):
if self.is_server_running():
return
self.server_process = multiprocessing.Process(target=self.target)
self.server_process.start()
def stop_server(self):
if self.server_process is not None:
self.server_process.terminate()
self.server_process.join()
self.server_process = None
def is_server_running(self):
return self.server_process is not None
def health_check(self):
"""
:return True if everything is OK.
"""
if not self.is_server_running():
logger.debug("Server is not running, no check.")
return
#logger.debug("Doing check.")
exitcode = self.server_process.exitcode
if exitcode is None:
return True
if exitcode < 0 or exitcode == 1:
if self.restarted_times > 5:
logger.error("Server restarted more than 5 times, give up!")
self.server_process = None
return False
self.server_process = multiprocessing.Process(target=start_server_process)
self.server_process.start()
self.restarted_times += 1
logger.warning("Server process restarted.")
return True
else:
logger.info("Server process exited.")
self.server_process = None
return False
class ShellBase(object):
"""
Base class for Shell implementations.
Shell is responsible for launching server process and provides machinery for the user to stop it.
"""
__metaclass__ = ABCMeta
def __init__(self):
port = settings['webfront']['listen_port']
self.base_url = "http://127.0.0.1:%d/" % (port,)
self.supervisor = Supervisor()
self.shell_stopped = False
def open_main_ui(self):
webbrowser.open(self.base_url)
def is_server_running(self):
return self.supervisor.is_server_running()
def set_base_url(self, url):
self.base_url = url
def start_server(self):
self.supervisor.start_server()
def stop_server(self):
self.supervisor.stop_server()
def check_server(self):
"""
Checks if server process in running.
"""
#logger.debug("Checking server status...")
if not self.supervisor.health_check():
self.shell_stopped = True
def _on_idle(self):
"""
Subclass should run this method from time to time.
"""
self.check_server()
def do_run(self):
self.start_server()
try:
self.run()
logger.debug("Shell is stopping.")
finally:
logger.debug("Stopping server")
self.stop_server()
@abstractmethod
def run(self):
"""
Subclass must implement this method to launch the shell.
"""
|
test_gluon_model_zoo.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import mxnet as mx
from mxnet.gluon.model_zoo.vision import get_model
import sys
import multiprocessing
import pytest
mx.npx.reset_np()
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
@pytest.mark.parametrize('model_name', [
'resnet18_v1', 'resnet34_v1', 'resnet50_v1', 'resnet101_v1', 'resnet152_v1',
'resnet18_v2', 'resnet34_v2', 'resnet50_v2', 'resnet101_v2', 'resnet152_v2',
'vgg11', 'vgg13', 'vgg16', 'vgg19',
'vgg11_bn', 'vgg13_bn', 'vgg16_bn', 'vgg19_bn',
'alexnet', 'inceptionv3',
'densenet121', 'densenet161', 'densenet169', 'densenet201',
'squeezenet1.0', 'squeezenet1.1',
'mobilenet1.0', 'mobilenet0.75', 'mobilenet0.5', 'mobilenet0.25',
'mobilenetv2_1.0', 'mobilenetv2_0.75', 'mobilenetv2_0.5', 'mobilenetv2_0.25'
])
def test_models(model_name):
pretrained_to_test = set(['mobilenetv2_0.25'])
test_pretrain = model_name in pretrained_to_test
model = get_model(model_name, pretrained=test_pretrain, root='model/')
data_shape = (2, 3, 224, 224) if 'inception' not in model_name else (2, 3, 299, 299)
eprint('testing forward for %s' % model_name)
print(model)
if not test_pretrain:
model.initialize()
model(mx.np.random.uniform(size=data_shape)).wait_to_read()
def parallel_download(model_name):
model = get_model(model_name, pretrained=True, root='./parallel_download')
print(type(model))
@pytest.mark.skip(reason='MXNet is not yet safe for forking. Tracked in #17782.')
def test_parallel_download():
processes = []
name = 'mobilenetv2_0.25'
for _ in range(10):
p = multiprocessing.Process(target=parallel_download, args=(name,))
processes.append(p)
for p in processes:
p.start()
for p in processes:
p.join()
|
gdaltest_python2.py
|
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Python Library supporting GDAL/OGR Test Suite
# Author: Even Rouault, <even dot rouault at mines dash paris dot org>
#
###############################################################################
# Copyright (c) 2003, Frank Warmerdam <warmerdam@pobox.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import urllib2
import socket
import os
import sys
from sys import version_info
from Queue import Queue
from threading import Thread
def run_func(func):
try:
result = func()
print(result)
return result
except SystemExit, x:
import traceback
traceback.print_exc()
raise x
except:
result = 'fail (blowup)'
print(result)
import traceback
traceback.print_exc()
return result
def urlescape(url):
# Escape any non-ASCII characters
try:
import urllib
url = urllib.quote(url)
except:
pass
return url
def gdalurlopen(url):
timeout = 10
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
if 'GDAL_HTTP_PROXY' in os.environ:
proxy = os.environ['GDAL_HTTP_PROXY']
if 'GDAL_HTTP_PROXYUSERPWD' in os.environ:
proxyuserpwd = os.environ['GDAL_HTTP_PROXYUSERPWD']
proxyHandler = urllib2.ProxyHandler({"http" : \
"http://%s@%s" % (proxyuserpwd, proxy)})
else:
proxyuserpwd = None
proxyHandler = urllib2.ProxyHandler({"http" : \
"http://%s" % (proxy)})
opener = urllib2.build_opener(proxyHandler, urllib2.HTTPHandler)
urllib2.install_opener(opener)
try:
handle = urllib2.urlopen(url)
socket.setdefaulttimeout(old_timeout)
return handle
except urllib2.HTTPError, e:
print('HTTP service for %s is down (HTTP Error: %d)' % (url, e.code))
socket.setdefaulttimeout(old_timeout)
return None
except urllib2.URLError, e:
print('HTTP service for %s is down (HTTP Error: %s)' % (url, e.reason))
socket.setdefaulttimeout(old_timeout)
return None
except:
print('HTTP service for %s is down.' %(url))
socket.setdefaulttimeout(old_timeout)
return None
def warn_if_memleak(cmd, out_str):
# If DEBUG_VSIMALLOC_STATS is defined, this is an easy way
# to catch some memory leaks
if cmd.find('--utility_version') == -1 and \
out_str.find('VSIMalloc + VSICalloc - VSIFree') != -1 and \
out_str.find('VSIMalloc + VSICalloc - VSIFree : 0') == -1:
print('memory leak detected')
print(out_str)
def spawn_async26(cmd):
import shlex
import subprocess
command = shlex.split(cmd)
try:
process = subprocess.Popen(command, stdout=subprocess.PIPE)
return (process, process.stdout)
except:
return (None, None)
def spawn_async(cmd):
if version_info >= (2,6,0):
return spawn_async26(cmd)
import popen2
try:
process = popen2.Popen3(cmd)
except:
return (None, None)
if process is None:
return (None, None)
process.tochild.close()
return (process, process.fromchild)
def wait_process(process):
process.wait()
def runexternal(cmd, strin = None, check_memleak = True, display_live_on_parent_stdout = False):
if strin is None:
ret_stdout = os.popen(cmd)
else:
(ret_stdin, ret_stdout) = os.popen2(cmd)
ret_stdin.write(strin)
ret_stdin.close()
if display_live_on_parent_stdout:
out_str = ''
while True:
c = ret_stdout.read(1)
if c == '':
break
out_str = out_str + c
sys.stdout.write(c)
ret_stdout.close()
else:
out_str = ret_stdout.read()
ret_stdout.close()
if check_memleak:
warn_if_memleak(cmd, out_str)
return out_str
def read_in_thread(f, q):
q.put(f.read())
f.close()
def runexternal_out_and_err(cmd, check_memleak = True):
(ret_stdin, ret_stdout, ret_stderr) = os.popen3(cmd)
ret_stdin.close()
q_stdout = Queue()
t_stdout = Thread(target=read_in_thread, args=(ret_stdout, q_stdout))
q_stderr = Queue()
t_stderr = Thread(target=read_in_thread, args=(ret_stderr, q_stderr))
t_stdout.start()
t_stderr.start()
out_str = q_stdout.get()
err_str = q_stderr.get()
if check_memleak:
warn_if_memleak(cmd, out_str)
return (out_str, err_str)
|
__init__.py
|
import re
import types
import sys
import os
import io
from pathlib import Path
import tempfile
import random
import json
import asyncio
import shutil
import logging
from datetime import datetime
import threading
import click
from girder_client import GirderClient
from faker import Faker
from faker.providers import internet
from flask import Flask, send_from_directory, jsonify
log = logging.getLogger('adash')
fake = Faker()
fake.add_provider(internet)
machines = ['summit', 'titan', 'frontier']
async def add_shot_entry(output_path, shot_entry):
log.info('Updating shots/index.json')
shots_path = output_path / 'shots'
shots_index_path = shots_path / 'index.json'
shots_index = []
if shots_index_path.exists():
with shots_index_path.open() as f:
shots_index = json.load(f)
shots_index.append(shot_entry)
with shots_index_path.open('w') as f:
json.dump(shots_index, f)
async def update_timestep(output_path, shot_name, run_name, timestep, final_step):
time_path = output_path / 'shots' / shot_name / run_name / 'time.json'
log.info('Updating "%s" timestep="%d"' % (os.path.join('shots', shot_name, run_name,'time.json'),
timestep ))
if time_path.exists():
with time_path.open() as f:
time = json.load(f)
time['current'] += 1
with time_path.open('w') as f:
time = {
'current': timestep
}
if final_step:
time['complete'] = True
json.dump(time, f)
async def mock_run(images_path, output_path, shot, run_name, username, run_interval, timestep_interval, machine):
create_time = random.randrange(run_interval)
log.info('Starting run "%s" in %s seconds.' % (run_name, create_time))
await asyncio.sleep(create_time)
shot_entry = {
'shot_name': shot,
'run_name': run_name,
'username': username,
'machine_name': machine,
'date': datetime.now().isoformat()
}
await add_shot_entry(output_path, shot_entry)
log.info('Starting run')
log.info(json.dumps(shot_entry, indent=2))
timestep = 1
while True:
await asyncio.sleep(timestep_interval)
timestep_path = images_path / str(timestep)
next_timestep_path = images_path / str(timestep + 1)
final_step = False
if not next_timestep_path.exists():
# This is the last timestep!
final_step = True
target_run_path = output_path / 'shots' / shot / run_name / str(timestep)
loop = asyncio.get_event_loop()
await loop.run_in_executor(None, shutil.copytree, timestep_path, target_run_path)
await update_timestep(output_path, shot, run_name, timestep, final_step)
if final_step:
# We need to wait so the watching client gets the message
await asyncio.sleep(60)
break
timestep += 1
async def mock_runs(images_path, output_path, shots, runs, run_interval, timestep_interval):
shot_names = ['shot%d' % x for x in range(0, shots)]
usernames = [fake.user_name() for x in range(0, runs)]
run_names = ['run%d' % run for run in range(0, runs)]
image_paths = [images_path]*runs
machine_names = [random.choice(machines) for x in range(0, runs)]
index_path = images_path / 'index.json'
# If we are using a real data set then we can use the index file.
if index_path.exists():
usernames = []
shot_names = []
run_names = []
image_paths = []
machine_names = []
with index_path.open('r') as fp:
shots = json.load(fp)
for shot in shots:
usernames.append(shot['username'])
shot_names.append(shot['shot_name'])
run_names.append(shot['run_name'])
image_paths.append( images_path / shot['shot_name'] / shot['run_name'])
machine_names.append(shot['machine_name'])
shots_path = output_path / 'shots'
if not shots_path.exists():
shots_path.mkdir(parents=True, exist_ok=False)
tasks = []
for shot in shot_names:
for path, run, user, machine in zip(image_paths, run_names, usernames, machine_names):
tasks.append(
asyncio.create_task(
mock_run(path, output_path, shot, run,
user, run_interval, timestep_interval, machine)
)
)
await asyncio.gather(*tasks)
@click.command('mock', help='Mock simulation web upload site')
@click.option('-s', '--shots', default=2, type=int, help='number of shots to simulate')
@click.option('-r', '--runs', default=2, type=int, help='number of runs per shot')
@click.option('-i', '--run-interval', default=30, type=int, help='seconds to create runs in')
@click.option('-t', '--timestep-interval', default=30, type=int, help='timestep delta seconds')
@click.option('-d', '--data-path', required=True,
type=click.Path(exists=True, file_okay=False, dir_okay=True,
resolve_path=True), help='path to images')
def main(shots, runs, run_interval, timestep_interval, data_path):
app = Flask(__name__)
with tempfile.TemporaryDirectory() as static_content_dir:
log.info('static content directory: %s' % static_content_dir)
static_content_dir = Path(static_content_dir)
@app.route('/<path:path>')
def static_proxy(path):
path = static_content_dir / path
return send_from_directory(path.parent, path.name)
threading.Thread(target=app.run).start()
asyncio.run(mock_runs(Path(data_path), Path(static_content_dir), shots,
runs, run_interval, timestep_interval))
|
walking_simulation.py
|
#!/usr/bin/env python
import concurrent
import ctypes
import cv2
import math
import numpy as np
import os
import pybullet as p
import pybullet_data
import random
import rospkg
import rospy
import tf2_ros
import threading
from cv_bridge import CvBridge
from nav_msgs.msg import Odometry
from geometry_msgs.msg import PoseWithCovarianceStamped, TransformStamped, Twist
from quadruped_ctrl.srv import QuadrupedCmd, QuadrupedCmdResponse
from pybullet_utils import gazebo_world_parser
from sensor_msgs.msg import Image, Imu, JointState, PointCloud2, PointField
from tf_conversions import transformations
from whole_body_state_msgs.msg import WholeBodyState
from whole_body_state_msgs.msg import JointState as WBJointState
from whole_body_state_msgs.msg import ContactState as WBContactState
class StructPointer(ctypes.Structure):
_fields_ = [("eff", ctypes.c_double * 12)]
class WalkingSimulation(object):
def __init__(self):
self.terrain = "racetrack"
self.camera = True
self.get_last_vel = [0] * 3
self.robot_height = 0.30
self.motor_id_list = [0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14]
self.init_new_pos = [0.0, -0.8, 1.6, 0.0, -0.8, 1.6, 0.0, -0.8, 1.6, 0.0, -0.8, 1.6,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
self.__init_ros()
self.__load_controller()
self.__init_simulator()
add_thread = threading.Thread(target=self.__thread_job)
add_thread.start()
if self.camera:
add_thread_1 = threading.Thread(target=self.__camera_update)
add_thread_1.start()
def __init_ros(self):
self.terrain = rospy.get_param('/simulation/terrain')
self.camera = rospy.get_param('/simulation/camera')
self.lateralFriction = rospy.get_param('/simulation/lateralFriction')
self.spinningFriction = rospy.get_param('/simulation/spinningFriction')
self.freq = rospy.get_param('/simulation/freq')
self.stand_kp = rospy.get_param('/simulation/stand_kp')
self.stand_kd = rospy.get_param('/simulation/stand_kd')
self.joint_kp = rospy.get_param('/simulation/joint_kp')
self.joint_kd = rospy.get_param('/simulation/joint_kd')
rospy.loginfo("lateralFriction = " + str(self.lateralFriction) +
" spinningFriction = " + str(self.spinningFriction))
rospy.loginfo(" freq = " + str(self.freq) + " PID = " +
str([self.stand_kp, self.stand_kd, self.joint_kp, self.joint_kd]))
self.s0 = rospy.Service('gait_type', QuadrupedCmd, self.__callback_gait)
self.s1 = rospy.Service('robot_mode', QuadrupedCmd, self.__callback_mode)
self.s2 = rospy.Subscriber("cmd_vel", Twist, self.__callback_body_vel, buff_size=30)
self.robot_tf = tf2_ros.TransformBroadcaster()
def __load_controller(self):
self.path = rospkg.RosPack().get_path('quadruped_ctrl')
so_file = self.path.replace('src/quadruped_ctrl', 'devel/lib/libquadruped_ctrl.so')
if(not os.path.exists(so_file)):
so_file = self.path.replace('src/quadruped_ctrl', 'build/lib/libquadruped_ctrl.so')
if(not os.path.exists(so_file)):
rospy.logerr("cannot find cpp.so file")
self.cpp_gait_ctrller = ctypes.cdll.LoadLibrary(so_file)
self.cpp_gait_ctrller.torque_calculator.restype = ctypes.POINTER(StructPointer)
rospy.loginfo("find so file = " + so_file)
def __init_simulator(self):
robot_start_pos = [0, 0, self.robot_height]
p.connect(p.GUI)
p.setAdditionalSearchPath(pybullet_data.getDataPath()) # optionally
p.resetSimulation()
p.setTimeStep(1.0/self.freq)
p.setGravity(0, 0, -9.81)
self.reset = p.addUserDebugParameter("reset", 1, 0, 0)
self.low_energy_mode = p.addUserDebugParameter("low_energy_mode", 1, 0, 0)
self.high_performance_mode = p.addUserDebugParameter("high_performance_mode", 1, 0, 0)
p.resetDebugVisualizerCamera(0.2, 45, -30, [1, -1, 1])
heightPerturbationRange = 0.06
numHeightfieldRows = 256
numHeightfieldColumns = 256
if self.terrain == "plane":
planeShape = p.createCollisionShape(shapeType=p.GEOM_PLANE)
ground_id = p.createMultiBody(0, planeShape)
p.resetBasePositionAndOrientation(ground_id, [0, 0, 0], [0, 0, 0, 1])
p.changeDynamics(ground_id, -1, lateralFriction=self.lateralFriction)
elif self.terrain == "random1":
heightfieldData = [0]*numHeightfieldRows*numHeightfieldColumns
for j in range(int(numHeightfieldColumns/2)):
for i in range(int(numHeightfieldRows/2)):
height = random.uniform(0, heightPerturbationRange)
heightfieldData[2*i+2*j*numHeightfieldRows] = height
heightfieldData[2*i+1+2*j*numHeightfieldRows] = height
heightfieldData[2*i+(2*j+1)*numHeightfieldRows] = height
heightfieldData[2*i+1+(2*j+1)*numHeightfieldRows] = height
terrainShape = p.createCollisionShape(
shapeType=p.GEOM_HEIGHTFIELD,
meshScale=[.05, .05, 1],
heightfieldTextureScaling=(numHeightfieldRows-1)/2,
heightfieldData=heightfieldData,
numHeightfieldRows=numHeightfieldRows,
numHeightfieldColumns=numHeightfieldColumns)
ground_id = p.createMultiBody(0, terrainShape)
p.resetBasePositionAndOrientation(ground_id, [0, 0, 0], [0, 0, 0, 1])
p.changeDynamics(ground_id, -1, lateralFriction=self.lateralFriction)
elif self.terrain == "random2":
terrain_shape = p.createCollisionShape(
shapeType=p.GEOM_HEIGHTFIELD,
meshScale=[.5, .5, .5],
fileName="heightmaps/ground0.txt",
heightfieldTextureScaling=128)
ground_id = p.createMultiBody(0, terrain_shape)
textureId = p.loadTexture(self.path + "/models/grass.png")
p.changeVisualShape(ground_id, -1, textureUniqueId=textureId)
p.resetBasePositionAndOrientation(ground_id, [1, 0, 0.2], [0, 0, 0, 1])
p.changeDynamics(ground_id, -1, lateralFriction=self.lateralFriction)
elif self.terrain == "stairs":
planeShape = p.createCollisionShape(shapeType=p.GEOM_PLANE)
ground_id = p.createMultiBody(0, planeShape)
p.resetBasePositionAndOrientation(ground_id, [0, 0, 0], [0, 0, 0, 1])
# many boxes
colSphereId = p.createCollisionShape(
p.GEOM_BOX, halfExtents=[0.1, 0.4, 0.01])
colSphereId1 = p.createCollisionShape(
p.GEOM_BOX, halfExtents=[0.1, 0.4, 0.02])
colSphereId2 = p.createCollisionShape(
p.GEOM_BOX, halfExtents=[0.1, 0.4, 0.03])
colSphereId3 = p.createCollisionShape(
p.GEOM_BOX, halfExtents=[0.1, 0.4, 0.04])
p.createMultiBody(100, colSphereId, basePosition=[1.0, 1.0, 0.0])
p.changeDynamics(colSphereId, -1, lateralFriction=self.lateralFriction)
p.createMultiBody(100, colSphereId1, basePosition=[1.2, 1.0, 0.0])
p.changeDynamics(colSphereId1, -1, lateralFriction=self.lateralFriction)
p.createMultiBody(100, colSphereId2, basePosition=[1.4, 1.0, 0.0])
p.changeDynamics(colSphereId2, -1, lateralFriction=self.lateralFriction)
p.createMultiBody(100, colSphereId3, basePosition=[1.6, 1.0, 0.0])
p.changeDynamics(colSphereId3, -1, lateralFriction=self.lateralFriction)
p.changeDynamics(ground_id, -1, lateralFriction=self.lateralFriction)
elif self.terrain == "racetrack":
os.chdir(self.path)
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 0)
gazebo_world_parser.parseWorld(p, filepath="worlds/racetrack_day.world")
p.configureDebugVisualizer(shadowMapResolution=8192)
p.configureDebugVisualizer(shadowMapWorldSize=25)
# Enable rendering after loading the world
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 1)
# Disable visualization of cameras in pybullet GUI
p.configureDebugVisualizer(p.COV_ENABLE_RGB_BUFFER_PREVIEW, 0)
p.configureDebugVisualizer(p.COV_ENABLE_DEPTH_BUFFER_PREVIEW, 0)
p.configureDebugVisualizer(p.COV_ENABLE_SEGMENTATION_MARK_PREVIEW, 0)
# Enable this if you want better performance
p.configureDebugVisualizer(p.COV_ENABLE_SINGLE_STEP_RENDERING, 0)
p.configureDebugVisualizer(p.COV_ENABLE_GUI, 1)
# TODO: Get the URDF from robot_description parameter (or URDF file in the repo)
self.boxId = p.loadURDF("mini_cheetah/mini_cheetah.urdf", robot_start_pos, useFixedBase=False)
p.changeDynamics(self.boxId, 3, spinningFriction=self.spinningFriction)
p.changeDynamics(self.boxId, 7, spinningFriction=self.spinningFriction)
p.changeDynamics(self.boxId, 11, spinningFriction=self.spinningFriction)
p.changeDynamics(self.boxId, 15, spinningFriction=self.spinningFriction)
self.__reset_robot()
def __reset_robot(self):
if self.terrain == "racetrack":
robot_z = 0.4
else:
robot_z = self.robot_height
p.resetBasePositionAndOrientation(
self.boxId, [0, 0, robot_z], [0, 0, 0, 1])
p.resetBaseVelocity(self.boxId, [0, 0, 0], [0, 0, 0])
for j in range(12):
p.resetJointState(
self.boxId, self.motor_id_list[j], self.init_new_pos[j], self.init_new_pos[j+12])
self.cpp_gait_ctrller.init_controller(
self.__convert_type(self.freq),
self.__convert_type([self.stand_kp, self.stand_kd, self.joint_kp, self.joint_kd]))
for _ in range(10):
p.stepSimulation()
imu_data, leg_data, _, _ = self.__get_data_from_sim()
self.cpp_gait_ctrller.pre_work(self.__convert_type(
imu_data), self.__convert_type(leg_data["state"]))
p.setJointMotorControlArray(bodyUniqueId=self.boxId,
jointIndices=self.motor_id_list,
controlMode=p.VELOCITY_CONTROL,
forces=[0]*len(self.motor_id_list))
self.cpp_gait_ctrller.set_robot_mode(self.__convert_type(1))
def run(self):
rate = rospy.Rate(self.freq) # Hz
reset_flag = p.readUserDebugParameter(self.reset)
low_energy_flag = p.readUserDebugParameter(self.low_energy_mode)
high_performance_flag = p.readUserDebugParameter(self.high_performance_mode)
while not rospy.is_shutdown():
# check reset button state
if(reset_flag < p.readUserDebugParameter(self.reset)):
reset_flag = p.readUserDebugParameter(self.reset)
rospy.logwarn("reset the robot")
self.__reset_robot()
if(low_energy_flag < p.readUserDebugParameter(self.low_energy_mode)):
low_energy_flag = p.readUserDebugParameter(self.low_energy_mode)
rospy.loginfo("set robot to low energy mode")
self.cpp_gait_ctrller.set_robot_mode(self.__convert_type(1))
if(high_performance_flag < p.readUserDebugParameter(self.high_performance_mode)):
high_performance_flag = p.readUserDebugParameter(self.high_performance_mode)
rospy.loginfo("set robot to high performance mode")
self.cpp_gait_ctrller.set_robot_mode(self.__convert_type(0))
self.__simulation_step()
rate.sleep()
def __simulation_step(self):
# get data from simulator
imu_data, leg_data, base_pos, contact_points = self.__get_data_from_sim()
# pub msg
self.__pub_nav_msg(base_pos, imu_data)
self.__pub_ground_truth_pose(base_pos, imu_data)
self.__pub_imu_msg(imu_data)
self.__pub_joint_states(leg_data)
self.__pub_whole_body_state(imu_data, leg_data, base_pos, contact_points)
# call cpp function to calculate mpc tau
tau = self.cpp_gait_ctrller.torque_calculator(self.__convert_type(
imu_data), self.__convert_type(leg_data["state"]))
# set tau to simulator
p.setJointMotorControlArray(bodyUniqueId=self.boxId,
jointIndices=self.motor_id_list,
controlMode=p.TORQUE_CONTROL,
forces=tau.contents.eff)
p.stepSimulation()
def __get_ros_depth_image_msg(self, depth):
depth_raw_image = self.far * self.near / (self.far - (self.far - self.near) * depth)
depth_raw_image = (depth_raw_image * 1000).astype(np.uint16)
msg = CvBridge().cv2_to_imgmsg(depth_raw_image)
msg.header.stamp = rospy.Time.now()
msg.header.frame_id = "body"
return msg
def __get_ros_rgb_image_msg(self, rgba):
image = cv2.cvtColor(np.uint8(rgba), code=cv2.COLOR_RGBA2RGB)
msg = CvBridge().cv2_to_imgmsg(image)
msg.header.stamp = rospy.Time().now()
msg.header.frame_id = "cam"
msg.encoding = "rgb8"
return msg
def calIntrinsicMatrix(self):
f = math.sqrt(self.width * self.width / 4.0 + self.height * self.height / 4.0) / 2.0 / \
math.tan(self.fov / 2.0 / 180.0 * math.pi)
return (f, 0.0, self.width / 2.0 - 0.5, 0.0, f, self.height / 2.0 - 0.5, 0.0, 0.0, 1.0)
def __generate_scene_pointcloud(self, depth, rgba):
'''Generate point cloud from depth image and color image
Args:
depth(str / np.array): Depth image path or depth.
rgb(str / np.array): RGB image path or RGB values.
intrinsics(np.array): Camera intrinsics matrix.
depth_scale(float): The depth factor.
Returns:
np.array(float), np.array(int): points and colors
'''
intrinsics = np.array(self.calIntrinsicMatrix()).reshape((3, 3))
depth_scale = 1.0
depths = self.far * self.near / (self.far - (self.far - self.near) * depth)
colors = cv2.cvtColor(np.uint8(rgba), code=cv2.COLOR_RGBA2RGB)
fx, fy = intrinsics[0, 0], intrinsics[1, 1]
cx, cy = intrinsics[0, 2], intrinsics[1, 2]
xmap, ymap = np.arange(colors.shape[1]), np.arange(colors.shape[0])
xmap, ymap = np.meshgrid(xmap, ymap)
points_z = depths / depth_scale
points_x = (xmap - cx) / fx * points_z
points_y = (ymap - cy) / fy * points_z
mask = (points_z > 0)
points = np.stack([points_x, points_y, points_z], axis=-1)
points = points[mask]
colors = colors[mask]
return points, colors
def __get_ros_pointcloud_msg(self, depth, rgba):
points, colors = self.__generate_scene_pointcloud(depth, rgba)
points = points.astype(np.float32)
msg = PointCloud2()
msg.header.stamp = rospy.Time().now()
C = np.zeros((colors[:, 0].size, 4), dtype=np.uint8)
C[:, 0] = colors[:, 2].astype(np.uint8)
C[:, 1] = colors[:, 1].astype(np.uint8)
C[:, 2] = colors[:, 0].astype(np.uint8)
C = C.view("uint32")
C = C.view("float32")
pointsColor = np.zeros((points.shape[0], 1), \
dtype={
"names": ( "x", "y", "z", "rgba" ),
"formats": ( "f4", "f4", "f4", "f4" )} )
points = points.astype(np.float32)
pointsColor["x"] = points[:, 0].reshape((-1, 1))
pointsColor["y"] = points[:, 1].reshape((-1, 1))
pointsColor["z"] = points[:, 2].reshape((-1, 1))
pointsColor["rgba"] = C
msg.header.frame_id = "cam"
if len(points.shape) == 3:
msg.height = points.shape[1]
msg.width = points.shape[0]
else:
msg.height = 1
msg.width = len(points)
msg.fields = [
PointField('x', 0, PointField.FLOAT32, 1),
PointField('y', 4, PointField.FLOAT32, 1),
PointField('z', 8, PointField.FLOAT32, 1),
PointField('rgb', 12, PointField.FLOAT32, 1)]
msg.is_bigendian = False
msg.point_step = 16
msg.row_step = msg.point_step * points.shape[0]
msg.is_dense = int(np.isfinite(points).all())
msg.data = pointsColor.tostring()
return msg
# https://github.com/OCRTOC/OCRTOC_software_package/blob/master/pybullet_simulator/scripts/pybullet_env.py
def __camera_update(self):
rate = rospy.Rate(20)
# Projection matrix parameters
self.near = 0.01
self.far = 3.0
self.fov = 60
step_index = 4
self.width = int(320 / step_index)
self.height = int(240 / step_index)
self.aspect = float(self.width) / float(self.height)
# Init ROS publishers
self.pointcloud_publisher = rospy.Publisher("/cam0/depth/points", PointCloud2, queue_size=1)
self.image_publisher = rospy.Publisher("/cam0/image_raw", Image, queue_size=1)
self.depth_publisher = rospy.Publisher("/cam0/image_depth", Image, queue_size=1)
rospy.loginfo("Starting camera thread")
T1 = np.mat([[0, -1.0/2.0, np.sqrt(3.0)/2.0, 0.25], [-1, 0, 0, 0],
[0, -np.sqrt(3.0)/2.0, -1.0/2.0, 0], [0, 0, 0, 1]])
cameraEyePosition = [0.3, 0, 0.26436384367425125]
cameraTargetPosition = [1.0, 0, 0]
cameraUpVector = [0, 0, 1]
while not rospy.is_shutdown():
cubePos, cubeOrn = p.getBasePositionAndOrientation(self.boxId)
get_matrix = p.getMatrixFromQuaternion(cubeOrn)
T2 = np.mat([[get_matrix[0], get_matrix[1], get_matrix[2], cubePos[0]],
[get_matrix[3], get_matrix[4], get_matrix[5], cubePos[1]],
[get_matrix[6], get_matrix[7], get_matrix[8], cubePos[2]],
[0, 0, 0, 1]])
T3 = np.array(T2*T1)
cameraEyePosition = T3[0:3, 3].tolist()
cameraTargetPosition = (np.mat(T3) * np.array([[0], [0], [1], [1]]))[0:3]
# Get quaternion from numpy homogeneus matrix
cameraQuat = transformations.quaternion_from_matrix(T3)
self.robot_tf.sendTransform(self.__fill_tf_message("world", "body", cubePos, cubeOrn))
self.robot_tf.sendTransform(
self.__fill_tf_message("world", "cam", cameraEyePosition, cameraQuat))
self.robot_tf.sendTransform(
self.__fill_tf_message("world", "tar", cameraTargetPosition, cubeOrn))
viewMatrix = p.computeViewMatrix(
cameraEyePosition, cameraTargetPosition, cameraUpVector)
projectionMatrix = p.computeProjectionMatrixFOV(
self.fov, self.aspect, self.near, self.far)
_, _, rgba, depth, _ = p.getCameraImage(
self.width,
self.height,
viewMatrix=viewMatrix,
projectionMatrix=projectionMatrix,
shadow=1,
lightDirection=[1, 1, 1],
renderer=p.ER_BULLET_HARDWARE_OPENGL,
flags=p.ER_NO_SEGMENTATION_MASK)
with concurrent.futures.ThreadPoolExecutor() as executor:
f1 = executor.submit(self.__get_ros_depth_image_msg, depth)
f2 = executor.submit(self.__get_ros_rgb_image_msg, rgba)
f3 = executor.submit(self.__get_ros_pointcloud_msg, depth, rgba)
r1 = f1.result()
r2 = f2.result()
r3 = f3.result()
if(self.depth_publisher.get_num_connections() > 0):
self.depth_publisher.publish(r1)
if(self.image_publisher.get_num_connections() > 0):
self.image_publisher.publish(r2)
if(self.pointcloud_publisher.get_num_connections() > 0):
self.pointcloud_publisher.publish(r3)
rate.sleep()
def __convert_type(self, input):
ctypes_map = {
int: ctypes.c_int,
float: ctypes.c_double,
str: ctypes.c_char_p,
}
input_type = type(input)
if input_type is list:
length = len(input)
if length == 0:
rospy.logerr("convert type failed...input is " + input)
return 0
else:
arr = (ctypes_map[type(input[0])] * length)()
for i in range(length):
arr[i] = bytes(
input[i], encoding="utf-8") if (type(input[0]) is str) else input[i]
return arr
else:
if input_type in ctypes_map:
return ctypes_map[input_type](bytes(input, encoding="utf-8") if type(input) is str else input)
else:
rospy.logerr("convert type failed...input is "+input)
return 0
def __thread_job(self):
rospy.spin()
def __callback_gait(self, req):
self.cpp_gait_ctrller.set_gait_type(self.__convert_type(req.cmd))
return QuadrupedCmdResponse(0, "get the gait")
def __callback_mode(self, req):
self.cpp_gait_ctrller.set_robot_mode(self.__convert_type(req.cmd))
return QuadrupedCmdResponse(0, "get the mode")
def __callback_body_vel(self, msg):
vel = [msg.linear.x, msg.linear.y, msg.angular.x]
self.cpp_gait_ctrller.set_robot_vel(self.__convert_type(vel))
def __fill_tf_message(self, parent_frame, child_frame, translation, rotation):
t = TransformStamped()
t.header.stamp = rospy.Time.now()
t.header.frame_id = parent_frame
t.child_frame_id = child_frame
t.transform.translation.x = translation[0]
t.transform.translation.y = translation[1]
t.transform.translation.z = translation[2]
t.transform.rotation.x = rotation[0]
t.transform.rotation.y = rotation[1]
t.transform.rotation.z = rotation[2]
t.transform.rotation.w = rotation[3]
return t
def __pub_nav_msg(self, base_pos, imu_data):
pub_odom = rospy.Publisher("/robot_odom", Odometry, queue_size=30)
odom = Odometry()
odom.header.stamp = rospy.Time.now()
odom.header.frame_id = "world"
odom.child_frame_id = "body"
odom.pose.pose.position.x = base_pos[0]
odom.pose.pose.position.y = base_pos[1]
odom.pose.pose.position.z = base_pos[2]
odom.pose.pose.orientation.x = imu_data[3]
odom.pose.pose.orientation.y = imu_data[4]
odom.pose.pose.orientation.z = imu_data[5]
odom.pose.pose.orientation.w = imu_data[6]
pub_odom.publish(odom)
# Publish odom Tf
t = self.__fill_tf_message(
odom.header.frame_id, odom.child_frame_id, base_pos[0:3], imu_data[3:7])
self.robot_tf.sendTransform(t)
def __pub_ground_truth_pose(self, base_pos, imu_data):
pub_gt_pose = rospy.Publisher("/gt_pose", PoseWithCovarianceStamped, queue_size=1)
gt_pose = PoseWithCovarianceStamped()
gt_pose.header.stamp = rospy.Time.now()
gt_pose.header.frame_id = "body"
gt_pose.pose.pose.position.x = base_pos[0]
gt_pose.pose.pose.position.y = base_pos[1]
gt_pose.pose.pose.position.z = base_pos[2]
gt_pose.pose.pose.orientation.x = imu_data[3]
gt_pose.pose.pose.orientation.y = imu_data[4]
gt_pose.pose.pose.orientation.z = imu_data[5]
gt_pose.pose.pose.orientation.w = imu_data[6]
pub_gt_pose.publish(gt_pose)
def __pub_imu_msg(self, imu_data):
pub_imu = rospy.Publisher("/imu0", Imu, queue_size=30)
imu_msg = Imu()
imu_msg.linear_acceleration.x = imu_data[0]
imu_msg.linear_acceleration.y = imu_data[1]
imu_msg.linear_acceleration.z = imu_data[2]
imu_msg.angular_velocity.x = imu_data[7]
imu_msg.angular_velocity.y = imu_data[8]
imu_msg.angular_velocity.z = imu_data[9]
imu_msg.orientation.x = imu_data[3]
imu_msg.orientation.y = imu_data[4]
imu_msg.orientation.z = imu_data[5]
imu_msg.orientation.w = imu_data[6]
imu_msg.header.stamp = rospy.Time.now()
imu_msg.header.frame_id = "body"
pub_imu.publish(imu_msg)
def __pub_joint_states(self, joint_states):
pub_js = rospy.Publisher("joint_states", JointState, queue_size=30)
js_msg = JointState()
js_msg.name = []
js_msg.position = []
js_msg.velocity = []
for idx, name in enumerate(joint_states["name"]):
js_msg.name.append(name.decode('utf-8'))
js_msg.position.append(joint_states["state"][idx])
js_msg.velocity.append(joint_states["state"][12+idx])
js_msg.header.stamp = rospy.Time.now()
js_msg.header.frame_id = "body"
pub_js.publish(js_msg)
def __pub_whole_body_state(self, imu_data, leg_data, base_pos, contact_points):
wbs_pub = rospy.Publisher("wb_state", WholeBodyState, queue_size=10)
wbs = WholeBodyState()
wbs.header.stamp = rospy.Time.now()
wbs.header.frame_id = "world"
wbs.time = wbs.header.stamp.secs
# This represents the base state (CoM motion, angular motion and centroidal momenta)
wbs.centroidal.com_position.x = base_pos[0]
wbs.centroidal.com_position.y = base_pos[1]
wbs.centroidal.com_position.z = base_pos[2]
wbs.centroidal.base_orientation.x = imu_data[3]
wbs.centroidal.base_orientation.y = imu_data[4]
wbs.centroidal.base_orientation.z = imu_data[5]
wbs.centroidal.base_orientation.w = imu_data[6]
wbs.centroidal.base_angular_velocity.x = imu_data[7]
wbs.centroidal.base_angular_velocity.y = imu_data[8]
wbs.centroidal.base_angular_velocity.z = imu_data[9]
# This represents the joint state (position, velocity, acceleration and effort)
wbs.joints = []
for idx, name in enumerate(leg_data["name"]):
js_msg = WBJointState()
js_msg.name = name.decode('utf-8')
js_msg.position = leg_data["state"][idx]
js_msg.velocity = leg_data["state"][12+idx]
wbs.joints.append(js_msg)
# This represents the end-effector state (cartesian position and contact forces)
wbs.contacts = []
for contact_point in contact_points:
contact_msg = WBContactState()
contact_msg.name = "body"
contact_msg.type = WBContactState.ACTIVE
contact_msg.pose.position.x = contact_point[5][0]
contact_msg.pose.position.y = contact_point[5][1]
contact_msg.pose.position.z = contact_point[5][2]
contact_msg.wrench.force.z = contact_point[9]
contact_msg.surface_normal.x = contact_point[7][0]
contact_msg.surface_normal.y = contact_point[7][1]
contact_msg.surface_normal.z = contact_point[7][2]
contact_msg.friction_coefficient = self.lateralFriction
wbs.contacts.append(contact_msg)
wbs_pub.publish(wbs)
def __get_motor_joint_states(self, robot):
joint_number_range = range(p.getNumJoints(robot))
joint_states = p.getJointStates(robot, joint_number_range)
joint_infos = [p.getJointInfo(robot, i) for i in joint_number_range]
joint_states, joint_name = \
zip(*[(j, i[1]) for j, i in zip(joint_states, joint_infos) if i[2] != p.JOINT_FIXED])
joint_positions = [state[0] for state in joint_states]
joint_velocities = [state[1] for state in joint_states]
joint_torques = [state[3] for state in joint_states]
return joint_positions, joint_velocities, joint_torques, joint_name
def __get_data_from_sim(self):
get_matrix = []
get_velocity = []
get_invert = []
imu_data = [0] * 10
leg_data = {}
leg_data["state"] = [0] * 24
leg_data["name"] = [""] * 12
base_pose = p.getBasePositionAndOrientation(self.boxId)
get_velocity = p.getBaseVelocity(self.boxId)
get_invert = p.invertTransform(base_pose[0], base_pose[1])
get_matrix = p.getMatrixFromQuaternion(get_invert[1])
# IMU data
imu_data[3] = base_pose[1][0]
imu_data[4] = base_pose[1][1]
imu_data[5] = base_pose[1][2]
imu_data[6] = base_pose[1][3]
imu_data[7] = get_matrix[0] * get_velocity[1][0] + get_matrix[1] * \
get_velocity[1][1] + get_matrix[2] * get_velocity[1][2]
imu_data[8] = get_matrix[3] * get_velocity[1][0] + get_matrix[4] * \
get_velocity[1][1] + get_matrix[5] * get_velocity[1][2]
imu_data[9] = get_matrix[6] * get_velocity[1][0] + get_matrix[7] * \
get_velocity[1][1] + get_matrix[8] * get_velocity[1][2]
# calculate the acceleration of the robot
linear_X = (get_velocity[0][0] - self.get_last_vel[0]) * self.freq
linear_Y = (get_velocity[0][1] - self.get_last_vel[1]) * self.freq
linear_Z = 9.8 + (get_velocity[0][2] - self.get_last_vel[2]) * self.freq
imu_data[0] = get_matrix[0] * linear_X + \
get_matrix[1] * linear_Y + get_matrix[2] * linear_Z
imu_data[1] = get_matrix[3] * linear_X + \
get_matrix[4] * linear_Y + get_matrix[5] * linear_Z
imu_data[2] = get_matrix[6] * linear_X + \
get_matrix[7] * linear_Y + get_matrix[8] * linear_Z
# joint data
joint_positions, joint_velocities, _, joint_names = \
self.__get_motor_joint_states(self.boxId)
leg_data["state"][0:12] = joint_positions
leg_data["state"][12:24] = joint_velocities
leg_data["name"] = joint_names
# CoM velocity
self.get_last_vel = [get_velocity[0][0], get_velocity[0][1], get_velocity[0][2]]
# Contacts
contact_points = p.getContactPoints(self.boxId)
return imu_data, leg_data, base_pose[0], contact_points
if __name__ == '__main__':
rospy.init_node('quadruped_simulator', anonymous=True)
walking_simulation = WalkingSimulation()
walking_simulation.run()
|
core.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import io
import json
import multiprocessing
import os
import pickle # type: ignore
import re
import signal
import subprocess
import tempfile
import unittest
import warnings
from datetime import timedelta
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from tempfile import NamedTemporaryFile
from time import sleep
from typing import Optional
from unittest import mock
import sqlalchemy
from dateutil.relativedelta import relativedelta
from numpy.testing import assert_array_almost_equal
from pendulum import utcnow
from airflow import DAG, configuration, exceptions, jobs, models, settings, utils
from airflow.bin import cli
from airflow.configuration import AirflowConfigException, conf, run_command
from airflow.exceptions import AirflowException
from airflow.executors import SequentialExecutor
from airflow.hooks import hdfs_hook
from airflow.hooks.base_hook import BaseHook
from airflow.hooks.sqlite_hook import SqliteHook
from airflow.models import (
BaseOperator, Connection, DagBag, DagModel, DagRun, Pool, TaskFail, TaskInstance, Variable,
)
from airflow.operators.bash_operator import BashOperator
from airflow.operators.check_operator import CheckOperator, ValueCheckOperator
from airflow.operators.dagrun_operator import TriggerDagRunOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python_operator import PythonOperator
from airflow.settings import Session
from airflow.utils import timezone
from airflow.utils.dates import days_ago, infer_time_unit, round_time, scale_time_units
from airflow.utils.state import State
from airflow.utils.timezone import datetime
from tests.test_utils.config import conf_vars
DEV_NULL = '/dev/null'
TEST_DAG_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
DEFAULT_DATE = datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
TEST_DAG_ID = 'unit_tests'
EXAMPLE_DAG_DEFAULT_DATE = days_ago(2)
class OperatorSubclass(BaseOperator):
"""
An operator to test template substitution
"""
template_fields = ['some_templated_field']
def __init__(self, some_templated_field, *args, **kwargs):
super().__init__(*args, **kwargs)
self.some_templated_field = some_templated_field
def execute(self, context):
pass
class TestCore(unittest.TestCase):
TEST_SCHEDULE_WITH_NO_PREVIOUS_RUNS_DAG_ID = TEST_DAG_ID + 'test_schedule_dag_no_previous_runs'
TEST_SCHEDULE_DAG_FAKE_SCHEDULED_PREVIOUS_DAG_ID = \
TEST_DAG_ID + 'test_schedule_dag_fake_scheduled_previous'
TEST_SCHEDULE_DAG_NO_END_DATE_UP_TO_TODAY_ONLY_DAG_ID = \
TEST_DAG_ID + 'test_schedule_dag_no_end_date_up_to_today_only'
TEST_SCHEDULE_ONCE_DAG_ID = TEST_DAG_ID + 'test_schedule_dag_once'
TEST_SCHEDULE_RELATIVEDELTA_DAG_ID = TEST_DAG_ID + 'test_schedule_dag_relativedelta'
TEST_SCHEDULE_START_END_DATES_DAG_ID = TEST_DAG_ID + 'test_schedule_dag_start_end_dates'
default_scheduler_args = {"num_runs": 1}
def setUp(self):
self.dagbag = DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG(TEST_DAG_ID, default_args=self.args)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.run_after_loop = self.dag_bash.get_task('run_after_loop')
self.run_this_last = self.dag_bash.get_task('run_this_last')
def tearDown(self):
if os.environ.get('KUBERNETES_VERSION') is not None:
return
dag_ids_to_clean = [
TEST_DAG_ID,
self.TEST_SCHEDULE_WITH_NO_PREVIOUS_RUNS_DAG_ID,
self.TEST_SCHEDULE_DAG_FAKE_SCHEDULED_PREVIOUS_DAG_ID,
self.TEST_SCHEDULE_DAG_NO_END_DATE_UP_TO_TODAY_ONLY_DAG_ID,
self.TEST_SCHEDULE_ONCE_DAG_ID,
self.TEST_SCHEDULE_RELATIVEDELTA_DAG_ID,
self.TEST_SCHEDULE_START_END_DATES_DAG_ID,
]
session = Session()
session.query(DagRun).filter(
DagRun.dag_id.in_(dag_ids_to_clean)).delete(
synchronize_session=False)
session.query(TaskInstance).filter(
TaskInstance.dag_id.in_(dag_ids_to_clean)).delete(
synchronize_session=False)
session.query(TaskFail).filter(
TaskFail.dag_id.in_(dag_ids_to_clean)).delete(
synchronize_session=False)
session.commit()
session.close()
def test_schedule_dag_no_previous_runs(self):
"""
Tests scheduling a dag with no previous runs
"""
dag = DAG(self.TEST_SCHEDULE_WITH_NO_PREVIOUS_RUNS_DAG_ID)
dag.add_task(BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0),
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
dag.clear()
def test_schedule_dag_relativedelta(self):
"""
Tests scheduling a dag with a relativedelta schedule_interval
"""
delta = relativedelta(hours=+1)
dag = DAG(self.TEST_SCHEDULE_RELATIVEDELTA_DAG_ID,
schedule_interval=delta)
dag.add_task(BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0),
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
dag_run2 = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run2)
self.assertEqual(dag.dag_id, dag_run2.dag_id)
self.assertIsNotNone(dag_run2.run_id)
self.assertNotEqual('', dag_run2.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0) + delta,
dag_run2.execution_date,
msg='dag_run2.execution_date did not match expectation: {0}'
.format(dag_run2.execution_date)
)
self.assertEqual(State.RUNNING, dag_run2.state)
self.assertFalse(dag_run2.external_trigger)
dag.clear()
def test_schedule_dag_fake_scheduled_previous(self):
"""
Test scheduling a dag where there is a prior DagRun
which has the same run_id as the next run should have
"""
delta = timedelta(hours=1)
dag = DAG(self.TEST_SCHEDULE_DAG_FAKE_SCHEDULED_PREVIOUS_DAG_ID,
schedule_interval=delta,
start_date=DEFAULT_DATE)
dag.add_task(BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=DEFAULT_DATE))
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
dag.create_dagrun(run_id=DagRun.id_for_date(DEFAULT_DATE),
execution_date=DEFAULT_DATE,
state=State.SUCCESS,
external_trigger=True)
dag_run = scheduler.create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
DEFAULT_DATE + delta,
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
def test_schedule_dag_once(self):
"""
Tests scheduling a dag scheduled for @once - should be scheduled the first time
it is called, and not scheduled the second.
"""
dag = DAG(self.TEST_SCHEDULE_ONCE_DAG_ID)
dag.schedule_interval = '@once'
dag.add_task(BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
dag_run2 = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertIsNone(dag_run2)
dag.clear()
def test_fractional_seconds(self):
"""
Tests if fractional seconds are stored in the database
"""
dag = DAG(TEST_DAG_ID + 'test_fractional_seconds')
dag.schedule_interval = '@once'
dag.add_task(BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
start_date = timezone.utcnow()
run = dag.create_dagrun(
run_id='test_' + start_date.isoformat(),
execution_date=start_date,
start_date=start_date,
state=State.RUNNING,
external_trigger=False
)
run.refresh_from_db()
self.assertEqual(start_date, run.execution_date,
"dag run execution_date loses precision")
self.assertEqual(start_date, run.start_date,
"dag run start_date loses precision ")
def test_schedule_dag_start_end_dates(self):
"""
Tests that an attempt to schedule a task after the Dag's end_date
does not succeed.
"""
delta = timedelta(hours=1)
runs = 3
start_date = DEFAULT_DATE
end_date = start_date + (runs - 1) * delta
dag = DAG(self.TEST_SCHEDULE_START_END_DATES_DAG_ID,
start_date=start_date,
end_date=end_date,
schedule_interval=delta)
dag.add_task(BaseOperator(task_id='faketastic', owner='Also fake'))
# Create and schedule the dag runs
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for _ in range(runs):
dag_runs.append(scheduler.create_dag_run(dag))
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
def test_schedule_dag_no_end_date_up_to_today_only(self):
"""
Tests that a Dag created without an end_date can only be scheduled up
to and including the current datetime.
For example, if today is 2016-01-01 and we are scheduling from a
start_date of 2015-01-01, only jobs up to, but not including
2016-01-01 should be scheduled.
"""
session = settings.Session()
delta = timedelta(days=1)
now = utcnow()
start_date = now.subtract(weeks=1)
runs = (now - start_date).days
dag = DAG(self.TEST_SCHEDULE_DAG_NO_END_DATE_UP_TO_TODAY_ONLY_DAG_ID,
start_date=start_date,
schedule_interval=delta)
dag.add_task(BaseOperator(task_id='faketastic', owner='Also fake'))
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for _ in range(runs):
dag_run = scheduler.create_dag_run(dag)
dag_runs.append(dag_run)
# Mark the DagRun as complete
dag_run.state = State.SUCCESS
session.merge(dag_run)
session.commit()
# Attempt to schedule an additional dag run (for 2016-01-01)
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
def test_confirm_unittest_mod(self):
self.assertTrue(conf.get('core', 'unit_test_mode'))
def test_pickling(self):
dp = self.dag.pickle()
self.assertEqual(dp.pickle.dag_id, self.dag.dag_id)
def test_rich_comparison_ops(self):
class DAGsubclass(DAG):
pass
dag_eq = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_load_time = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_name = DAG(TEST_DAG_ID + '_neq', default_args=self.args)
dag_subclass = DAGsubclass(TEST_DAG_ID, default_args=self.args)
dag_subclass_diff_name = DAGsubclass(
TEST_DAG_ID + '2', default_args=self.args)
for d in [dag_eq, dag_diff_name, dag_subclass, dag_subclass_diff_name]:
d.last_loaded = self.dag.last_loaded
# test identity equality
self.assertEqual(self.dag, self.dag)
# test dag (in)equality based on _comps
self.assertEqual(dag_eq, self.dag)
self.assertNotEqual(dag_diff_name, self.dag)
self.assertNotEqual(dag_diff_load_time, self.dag)
# test dag inequality based on type even if _comps happen to match
self.assertNotEqual(dag_subclass, self.dag)
# a dag should equal an unpickled version of itself
d = pickle.dumps(self.dag)
self.assertEqual(pickle.loads(d), self.dag)
# dags are ordered based on dag_id no matter what the type is
self.assertLess(self.dag, dag_diff_name)
self.assertGreater(self.dag, dag_diff_load_time)
self.assertLess(self.dag, dag_subclass_diff_name)
# greater than should have been created automatically by functools
self.assertGreater(dag_diff_name, self.dag)
# hashes are non-random and match equality
self.assertEqual(hash(self.dag), hash(self.dag))
self.assertEqual(hash(dag_eq), hash(self.dag))
self.assertNotEqual(hash(dag_diff_name), hash(self.dag))
self.assertNotEqual(hash(dag_subclass), hash(self.dag))
def test_check_operators(self):
conn_id = "sqlite_default"
captainHook = BaseHook.get_hook(conn_id=conn_id)
captainHook.run("CREATE TABLE operator_test_table (a, b)")
captainHook.run("insert into operator_test_table values (1,2)")
t = CheckOperator(
task_id='check',
sql="select count(*) from operator_test_table",
conn_id=conn_id,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
t = ValueCheckOperator(
task_id='value_check',
pass_value=95,
tolerance=0.1,
conn_id=conn_id,
sql="SELECT 100",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
captainHook.run("drop table operator_test_table")
def test_clear_api(self):
task = self.dag_bash.tasks[0]
task.clear(
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
upstream=True, downstream=True)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.are_dependents_done()
def test_illegal_args(self):
"""
Tests that Operators reject illegal arguments
"""
with warnings.catch_warnings(record=True) as w:
BashOperator(
task_id='test_illegal_args',
bash_command='echo success',
dag=self.dag,
illegal_argument_1234='hello?')
self.assertTrue(
issubclass(w[0].category, PendingDeprecationWarning))
self.assertIn(
('Invalid arguments were passed to BashOperator '
'(task_id: test_illegal_args).'),
w[0].message.args[0])
def test_bash_operator(self):
t = BashOperator(
task_id='test_bash_operator',
bash_command="echo success",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_multi_byte_output(self):
t = BashOperator(
task_id='test_multi_byte_bash_operator',
bash_command="echo \u2600",
dag=self.dag,
output_encoding='utf-8')
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_kill(self):
import psutil
sleep_time = "100%d" % os.getpid()
t = BashOperator(
task_id='test_bash_operator_kill',
execution_timeout=timedelta(seconds=1),
bash_command="/bin/bash -c 'sleep %s'" % sleep_time,
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
sleep(2)
pid = -1
for proc in psutil.process_iter():
if proc.cmdline() == ['sleep', sleep_time]:
pid = proc.pid
if pid != -1:
os.kill(pid, signal.SIGTERM)
self.fail("BashOperator's subprocess still running after stopping on timeout!")
def test_on_failure_callback(self):
# Annoying workaround for nonlocal not existing in python 2
data = {'called': False}
def check_failure(context, test_case=self):
data['called'] = True
error = context.get('exception')
test_case.assertIsInstance(error, AirflowException)
t = BashOperator(
task_id='check_on_failure_callback',
bash_command="exit 1",
dag=self.dag,
on_failure_callback=check_failure)
self.assertRaises(
exceptions.AirflowException,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
self.assertTrue(data['called'])
def test_trigger_dagrun(self):
def trigga(_, obj):
if True:
return obj
t = TriggerDagRunOperator(
task_id='test_trigger_dagrun',
trigger_dag_id='example_bash_operator',
python_callable=trigga,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_dryrun(self):
t = BashOperator(
task_id='test_dryrun',
bash_command="echo success",
dag=self.dag)
t.dry_run()
def test_sqlite(self):
import airflow.operators.sqlite_operator
t = airflow.operators.sqlite_operator.SqliteOperator(
task_id='time_sqlite',
sql="CREATE TABLE IF NOT EXISTS unitest (dummy VARCHAR(20))",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_timeout(self):
t = PythonOperator(
task_id='test_timeout',
execution_timeout=timedelta(seconds=1),
python_callable=lambda: sleep(5),
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_python_op(self):
def test_py_op(templates_dict, ds, **kwargs):
if not templates_dict['ds'] == ds:
raise Exception("failure")
t = PythonOperator(
task_id='test_py_op',
python_callable=test_py_op,
templates_dict={'ds': "{{ ds }}"},
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_complex_template(self):
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field['bar'][1],
context['ds'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field={
'foo': '123',
'bar': ['baz', '{{ ds }}']
},
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_variable(self):
"""
Test the availability of variables in templates
"""
val = {
'test_value': 'a test value'
}
Variable.set("a_variable", val['test_value'])
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
val['test_value'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.value.a_variable }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_json_variable(self):
"""
Test the availability of variables (serialized as JSON) in templates
"""
val = {
'test_value': {'foo': 'bar', 'obj': {'v1': 'yes', 'v2': 'no'}}
}
Variable.set("a_variable", val['test_value'], serialize_json=True)
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
val['test_value']['obj']['v2'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.json.a_variable.obj.v2 }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_json_variable_as_value(self):
"""
Test the availability of variables (serialized as JSON) in templates, but
accessed as a value
"""
val = {
'test_value': {'foo': 'bar'}
}
Variable.set("a_variable", val['test_value'], serialize_json=True)
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
'{\n "foo": "bar"\n}')
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.value.a_variable }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_non_bool(self):
"""
Test templates can handle objects with no sense of truthiness
"""
class NonBoolObject:
def __len__(self):
return NotImplemented
def __bool__(self):
return NotImplemented
t = OperatorSubclass(
task_id='test_bad_template_obj',
some_templated_field=NonBoolObject(),
dag=self.dag)
t.resolve_template_files()
def test_task_get_template(self):
TI = TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
ti.run(ignore_ti_state=True)
context = ti.get_template_context()
# DEFAULT DATE is 2015-01-01
self.assertEqual(context['ds'], '2015-01-01')
self.assertEqual(context['ds_nodash'], '20150101')
# next_ds is 2015-01-02 as the dag interval is daily
self.assertEqual(context['next_ds'], '2015-01-02')
self.assertEqual(context['next_ds_nodash'], '20150102')
# prev_ds is 2014-12-31 as the dag interval is daily
self.assertEqual(context['prev_ds'], '2014-12-31')
self.assertEqual(context['prev_ds_nodash'], '20141231')
self.assertEqual(context['ts'], '2015-01-01T00:00:00+00:00')
self.assertEqual(context['ts_nodash'], '20150101T000000')
self.assertEqual(context['ts_nodash_with_tz'], '20150101T000000+0000')
self.assertEqual(context['yesterday_ds'], '2014-12-31')
self.assertEqual(context['yesterday_ds_nodash'], '20141231')
self.assertEqual(context['tomorrow_ds'], '2015-01-02')
self.assertEqual(context['tomorrow_ds_nodash'], '20150102')
def test_local_task_job(self):
TI = TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(task_instance=ti, ignore_ti_state=True)
job.run()
def test_raw_job(self):
TI = TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
ti.run(ignore_ti_state=True)
def test_variable_set_get_round_trip(self):
Variable.set("tested_var_set_id", "Monday morning breakfast")
self.assertEqual("Monday morning breakfast", Variable.get("tested_var_set_id"))
def test_variable_set_get_round_trip_json(self):
value = {"a": 17, "b": 47}
Variable.set("tested_var_set_id", value, serialize_json=True)
self.assertEqual(value, Variable.get("tested_var_set_id", deserialize_json=True))
def test_get_non_existing_var_should_return_default(self):
default_value = "some default val"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value))
def test_get_non_existing_var_should_raise_key_error(self):
with self.assertRaises(KeyError):
Variable.get("thisIdDoesNotExist")
def test_get_non_existing_var_with_none_default_should_return_none(self):
self.assertIsNone(Variable.get("thisIdDoesNotExist", default_var=None))
def test_get_non_existing_var_should_not_deserialize_json_default(self):
default_value = "}{ this is a non JSON default }{"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value,
deserialize_json=True))
def test_variable_setdefault_round_trip(self):
key = "tested_var_setdefault_1_id"
value = "Monday morning breakfast in Paris"
Variable.setdefault(key, value)
self.assertEqual(value, Variable.get(key))
def test_variable_setdefault_round_trip_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Happiness": True}
Variable.setdefault(key, value, deserialize_json=True)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_variable_setdefault_existing_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Happiness": True}
Variable.set(key, value, serialize_json=True)
val = Variable.setdefault(key, value, deserialize_json=True)
# Check the returned value, and the stored value are handled correctly.
self.assertEqual(value, val)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_variable_delete(self):
key = "tested_var_delete"
value = "to be deleted"
# No-op if the variable doesn't exist
Variable.delete(key)
with self.assertRaises(KeyError):
Variable.get(key)
# Set the variable
Variable.set(key, value)
self.assertEqual(value, Variable.get(key))
# Delete the variable
Variable.delete(key)
with self.assertRaises(KeyError):
Variable.get(key)
def test_parameterized_config_gen(self):
cfg = configuration.parameterized_config(configuration.DEFAULT_CONFIG)
# making sure some basic building blocks are present:
self.assertIn("[core]", cfg)
self.assertIn("dags_folder", cfg)
self.assertIn("sql_alchemy_conn", cfg)
self.assertIn("fernet_key", cfg)
# making sure replacement actually happened
self.assertNotIn("{AIRFLOW_HOME}", cfg)
self.assertNotIn("{FERNET_KEY}", cfg)
def test_config_use_original_when_original_and_fallback_are_present(self):
self.assertTrue(conf.has_option("core", "FERNET_KEY"))
self.assertFalse(conf.has_option("core", "FERNET_KEY_CMD"))
FERNET_KEY = conf.get('core', 'FERNET_KEY')
with conf_vars({('core', 'FERNET_KEY_CMD'): 'printf HELLO'}):
FALLBACK_FERNET_KEY = conf.get(
"core",
"FERNET_KEY"
)
self.assertEqual(FERNET_KEY, FALLBACK_FERNET_KEY)
def test_config_throw_error_when_original_and_fallback_is_absent(self):
self.assertTrue(conf.has_option("core", "FERNET_KEY"))
self.assertFalse(conf.has_option("core", "FERNET_KEY_CMD"))
with conf_vars({('core', 'fernet_key'): None}):
with self.assertRaises(AirflowConfigException) as cm:
conf.get("core", "FERNET_KEY")
exception = str(cm.exception)
message = "section/key [core/fernet_key] not found in config"
self.assertEqual(message, exception)
def test_config_override_original_when_non_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = "some value"
self.assertNotIn(key, os.environ)
os.environ[key] = value
FERNET_KEY = conf.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
# restore the envvar back to the original state
del os.environ[key]
def test_config_override_original_when_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = ""
self.assertNotIn(key, os.environ)
os.environ[key] = value
FERNET_KEY = conf.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
# restore the envvar back to the original state
del os.environ[key]
def test_round_time(self):
rt1 = round_time(datetime(2015, 1, 1, 6), timedelta(days=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt1)
rt2 = round_time(datetime(2015, 1, 2), relativedelta(months=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt2)
rt3 = round_time(datetime(2015, 9, 16, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 16, 0, 0), rt3)
rt4 = round_time(datetime(2015, 9, 15, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 15, 0, 0), rt4)
rt5 = round_time(datetime(2015, 9, 14, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt5)
rt6 = round_time(datetime(2015, 9, 13, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt6)
def test_infer_time_unit(self):
self.assertEqual('minutes', infer_time_unit([130, 5400, 10]))
self.assertEqual('seconds', infer_time_unit([110, 50, 10, 100]))
self.assertEqual('hours', infer_time_unit([100000, 50000, 10000, 20000]))
self.assertEqual('days', infer_time_unit([200000, 100000]))
def test_scale_time_units(self):
# use assert_almost_equal from numpy.testing since we are comparing
# floating point arrays
arr1 = scale_time_units([130, 5400, 10], 'minutes')
assert_array_almost_equal(arr1, [2.167, 90.0, 0.167], decimal=3)
arr2 = scale_time_units([110, 50, 10, 100], 'seconds')
assert_array_almost_equal(arr2, [110.0, 50.0, 10.0, 100.0], decimal=3)
arr3 = scale_time_units([100000, 50000, 10000, 20000], 'hours')
assert_array_almost_equal(arr3, [27.778, 13.889, 2.778, 5.556],
decimal=3)
arr4 = scale_time_units([200000, 100000], 'days')
assert_array_almost_equal(arr4, [2.315, 1.157], decimal=3)
def test_bad_trigger_rule(self):
with self.assertRaises(AirflowException):
DummyOperator(
task_id='test_bad_trigger',
trigger_rule="non_existent",
dag=self.dag)
def test_terminate_task(self):
"""If a task instance's db state get deleted, it should fail"""
TI = TaskInstance
dag = self.dagbag.dags.get('test_utils')
task = dag.task_dict.get('sleeps_forever')
ti = TI(task=task, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(
task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
# Running task instance asynchronously
p = multiprocessing.Process(target=job.run)
p.start()
sleep(5)
settings.engine.dispose()
session = settings.Session()
ti.refresh_from_db(session=session)
# making sure it's actually running
self.assertEqual(State.RUNNING, ti.state)
ti = session.query(TI).filter_by(
dag_id=task.dag_id,
task_id=task.task_id,
execution_date=DEFAULT_DATE
).one()
# deleting the instance should result in a failure
session.delete(ti)
session.commit()
# waiting for the async task to finish
p.join()
# making sure that the task ended up as failed
ti.refresh_from_db(session=session)
self.assertEqual(State.FAILED, ti.state)
session.close()
def test_task_fail_duration(self):
"""If a task fails, the duration should be recorded in TaskFail"""
p = BashOperator(
task_id='pass_sleepy',
bash_command='sleep 3',
dag=self.dag)
f = BashOperator(
task_id='fail_sleepy',
bash_command='sleep 5',
execution_timeout=timedelta(seconds=3),
retry_delay=timedelta(seconds=0),
dag=self.dag)
session = settings.Session()
try:
p.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except Exception:
pass
try:
f.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except Exception:
pass
p_fails = session.query(TaskFail).filter_by(
task_id='pass_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
f_fails = session.query(TaskFail).filter_by(
task_id='fail_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
self.assertEqual(0, len(p_fails))
self.assertEqual(1, len(f_fails))
self.assertGreaterEqual(sum([f.duration for f in f_fails]), 3)
def test_run_command(self):
write = r'sys.stdout.buffer.write("\u1000foo".encode("utf8"))'
cmd = 'import sys; {0}; sys.stdout.flush()'.format(write)
self.assertEqual(run_command("python -c '{0}'".format(cmd)), '\u1000foo')
self.assertEqual(run_command('echo "foo bar"'), 'foo bar\n')
self.assertRaises(AirflowConfigException, run_command, 'bash -c "exit 1"')
def test_trigger_dagrun_with_execution_date(self):
utc_now = timezone.utcnow()
run_id = 'trig__' + utc_now.isoformat()
def payload_generator(context, object): # pylint: disable=unused-argument
object.run_id = run_id
return object
task = TriggerDagRunOperator(task_id='test_trigger_dagrun_with_execution_date',
trigger_dag_id='example_bash_operator',
python_callable=payload_generator,
execution_date=utc_now,
dag=self.dag)
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
dag_runs = DagRun.find(dag_id='example_bash_operator', run_id=run_id)
self.assertEqual(len(dag_runs), 1)
dag_run = dag_runs[0]
self.assertEqual(dag_run.execution_date, utc_now)
def test_trigger_dagrun_with_str_execution_date(self):
utc_now_str = timezone.utcnow().isoformat()
self.assertIsInstance(utc_now_str, (str,))
run_id = 'trig__' + utc_now_str
def payload_generator(context, object): # pylint: disable=unused-argument
object.run_id = run_id
return object
task = TriggerDagRunOperator(
task_id='test_trigger_dagrun_with_str_execution_date',
trigger_dag_id='example_bash_operator',
python_callable=payload_generator,
execution_date=utc_now_str,
dag=self.dag)
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
dag_runs = DagRun.find(dag_id='example_bash_operator', run_id=run_id)
self.assertEqual(len(dag_runs), 1)
dag_run = dag_runs[0]
self.assertEqual(dag_run.execution_date.isoformat(), utc_now_str)
def test_trigger_dagrun_with_templated_execution_date(self):
task = TriggerDagRunOperator(
task_id='test_trigger_dagrun_with_str_execution_date',
trigger_dag_id='example_bash_operator',
execution_date='{{ execution_date }}',
dag=self.dag)
self.assertTrue(isinstance(task.execution_date, str))
self.assertEqual(task.execution_date, '{{ execution_date }}')
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.render_templates()
self.assertEqual(timezone.parse(task.execution_date), DEFAULT_DATE)
def test_externally_triggered_dagrun(self):
TI = TaskInstance
# Create the dagrun between two "scheduled" execution dates of the DAG
EXECUTION_DATE = DEFAULT_DATE + timedelta(days=2)
EXECUTION_DS = EXECUTION_DATE.strftime('%Y-%m-%d')
EXECUTION_DS_NODASH = EXECUTION_DS.replace('-', '')
dag = DAG(
TEST_DAG_ID,
default_args=self.args,
schedule_interval=timedelta(weeks=1),
start_date=DEFAULT_DATE)
task = DummyOperator(task_id='test_externally_triggered_dag_context',
dag=dag)
dag.create_dagrun(run_id=DagRun.id_for_date(EXECUTION_DATE),
execution_date=EXECUTION_DATE,
state=State.RUNNING,
external_trigger=True)
task.run(
start_date=EXECUTION_DATE, end_date=EXECUTION_DATE)
ti = TI(task=task, execution_date=EXECUTION_DATE)
context = ti.get_template_context()
# next_ds/prev_ds should be the execution date for manually triggered runs
self.assertEqual(context['next_ds'], EXECUTION_DS)
self.assertEqual(context['next_ds_nodash'], EXECUTION_DS_NODASH)
self.assertEqual(context['prev_ds'], EXECUTION_DS)
self.assertEqual(context['prev_ds_nodash'], EXECUTION_DS_NODASH)
class TestCli(unittest.TestCase):
TEST_USER1_EMAIL = 'test-user1@example.com'
TEST_USER2_EMAIL = 'test-user2@example.com'
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._cleanup()
def setUp(self):
super().setUp()
from airflow.www import app as application
self.app, self.appbuilder = application.create_app(session=Session, testing=True)
self.app.config['TESTING'] = True
self.parser = cli.CLIFactory.get_parser()
self.dagbag = DagBag(dag_folder=DEV_NULL, include_examples=True)
settings.configure_orm()
self.session = Session
def tearDown(self):
self._cleanup(session=self.session)
for email in [self.TEST_USER1_EMAIL, self.TEST_USER2_EMAIL]:
test_user = self.appbuilder.sm.find_user(email=email)
if test_user:
self.appbuilder.sm.del_register_user(test_user)
for role_name in ['FakeTeamA', 'FakeTeamB']:
if self.appbuilder.sm.find_role(role_name):
self.appbuilder.sm.delete_role(role_name)
super().tearDown()
@staticmethod
def _cleanup(session=None):
if session is None:
session = Session()
session.query(Pool).delete()
session.query(Variable).delete()
session.commit()
session.close()
def test_cli_list_dags(self):
args = self.parser.parse_args(['dags', 'list', '--report'])
cli.list_dags(args)
def test_cli_list_dag_runs(self):
cli.trigger_dag(self.parser.parse_args([
'dags', 'trigger', 'example_bash_operator', ]))
args = self.parser.parse_args(['dags', 'list_runs',
'example_bash_operator',
'--no_backfill'])
cli.list_dag_runs(args)
def test_cli_create_user_random_password(self):
args = self.parser.parse_args([
'users', 'create', '--username', 'test1', '--lastname', 'doe',
'--firstname', 'jon',
'--email', 'jdoe@foo.com', '--role', 'Viewer', '--use_random_password'
])
cli.users_create(args)
def test_cli_create_user_supplied_password(self):
args = self.parser.parse_args([
'users', 'create', '--username', 'test2', '--lastname', 'doe',
'--firstname', 'jon',
'--email', 'jdoe@apache.org', '--role', 'Viewer', '--password', 'test'
])
cli.users_create(args)
def test_cli_delete_user(self):
args = self.parser.parse_args([
'users', 'create', '--username', 'test3', '--lastname', 'doe',
'--firstname', 'jon',
'--email', 'jdoe@example.com', '--role', 'Viewer', '--use_random_password'
])
cli.users_create(args)
args = self.parser.parse_args([
'users', 'delete', '--username', 'test3',
])
cli.users_delete(args)
def test_cli_list_users(self):
for i in range(0, 3):
args = self.parser.parse_args([
'users', 'create', '--username', 'user{}'.format(i), '--lastname',
'doe', '--firstname', 'jon',
'--email', 'jdoe+{}@gmail.com'.format(i), '--role', 'Viewer',
'--use_random_password'
])
cli.users_create(args)
with mock.patch('sys.stdout', new_callable=io.StringIO) as mock_stdout:
cli.users_list(self.parser.parse_args(['users', 'list']))
stdout = mock_stdout.getvalue()
for i in range(0, 3):
self.assertIn('user{}'.format(i), stdout)
def test_cli_import_users(self):
def assertUserInRoles(email, roles):
for role in roles:
self.assertTrue(self._does_user_belong_to_role(email, role))
def assertUserNotInRoles(email, roles):
for role in roles:
self.assertFalse(self._does_user_belong_to_role(email, role))
assertUserNotInRoles(self.TEST_USER1_EMAIL, ['Admin', 'Op'])
assertUserNotInRoles(self.TEST_USER2_EMAIL, ['Public'])
users = [
{
"username": "imported_user1", "lastname": "doe1",
"firstname": "jon", "email": self.TEST_USER1_EMAIL,
"roles": ["Admin", "Op"]
},
{
"username": "imported_user2", "lastname": "doe2",
"firstname": "jon", "email": self.TEST_USER2_EMAIL,
"roles": ["Public"]
}
]
self._import_users_from_file(users)
assertUserInRoles(self.TEST_USER1_EMAIL, ['Admin', 'Op'])
assertUserInRoles(self.TEST_USER2_EMAIL, ['Public'])
users = [
{
"username": "imported_user1", "lastname": "doe1",
"firstname": "jon", "email": self.TEST_USER1_EMAIL,
"roles": ["Public"]
},
{
"username": "imported_user2", "lastname": "doe2",
"firstname": "jon", "email": self.TEST_USER2_EMAIL,
"roles": ["Admin"]
}
]
self._import_users_from_file(users)
assertUserNotInRoles(self.TEST_USER1_EMAIL, ['Admin', 'Op'])
assertUserInRoles(self.TEST_USER1_EMAIL, ['Public'])
assertUserNotInRoles(self.TEST_USER2_EMAIL, ['Public'])
assertUserInRoles(self.TEST_USER2_EMAIL, ['Admin'])
def test_cli_export_users(self):
user1 = {"username": "imported_user1", "lastname": "doe1",
"firstname": "jon", "email": self.TEST_USER1_EMAIL,
"roles": ["Public"]}
user2 = {"username": "imported_user2", "lastname": "doe2",
"firstname": "jon", "email": self.TEST_USER2_EMAIL,
"roles": ["Admin"]}
self._import_users_from_file([user1, user2])
users_filename = self._export_users_to_file()
with open(users_filename, mode='r') as file:
retrieved_users = json.loads(file.read())
os.remove(users_filename)
# ensure that an export can be imported
self._import_users_from_file(retrieved_users)
def find_by_username(username):
matches = [u for u in retrieved_users
if u['username'] == username]
if not matches:
self.fail("Couldn't find user with username {}".format(username))
else:
matches[0].pop('id') # this key not required for import
return matches[0]
self.assertEqual(find_by_username('imported_user1'), user1)
self.assertEqual(find_by_username('imported_user2'), user2)
def _import_users_from_file(self, user_list):
json_file_content = json.dumps(user_list)
f = NamedTemporaryFile(delete=False)
try:
f.write(json_file_content.encode())
f.flush()
args = self.parser.parse_args([
'users', 'import', f.name
])
cli.users_import(args)
finally:
os.remove(f.name)
def _export_users_to_file(self):
f = NamedTemporaryFile(delete=False)
args = self.parser.parse_args([
'users', 'export', f.name
])
cli.users_export(args)
return f.name
def _does_user_belong_to_role(self, email, rolename):
user = self.appbuilder.sm.find_user(email=email)
role = self.appbuilder.sm.find_role(rolename)
if user and role:
return role in user.roles
return False
def test_cli_add_user_role(self):
args = self.parser.parse_args([
'users', 'create', '--username', 'test4', '--lastname', 'doe',
'--firstname', 'jon',
'--email', self.TEST_USER1_EMAIL, '--role', 'Viewer', '--use_random_password'
])
cli.users_create(args)
self.assertFalse(
self._does_user_belong_to_role(email=self.TEST_USER1_EMAIL,
rolename='Op'),
"User should not yet be a member of role 'Op'"
)
args = self.parser.parse_args([
'users', 'add_role', '--username', 'test4', '--role', 'Op'
])
cli.users_manage_role(args, remove=False)
self.assertTrue(
self._does_user_belong_to_role(email=self.TEST_USER1_EMAIL,
rolename='Op'),
"User should have been added to role 'Op'"
)
def test_cli_remove_user_role(self):
args = self.parser.parse_args([
'users', 'create', '--username', 'test4', '--lastname', 'doe',
'--firstname', 'jon',
'--email', self.TEST_USER1_EMAIL, '--role', 'Viewer', '--use_random_password'
])
cli.users_create(args)
self.assertTrue(
self._does_user_belong_to_role(email=self.TEST_USER1_EMAIL,
rolename='Viewer'),
"User should have been created with role 'Viewer'"
)
args = self.parser.parse_args([
'users', 'remove_role', '--username', 'test4', '--role', 'Viewer'
])
cli.users_manage_role(args, remove=True)
self.assertFalse(
self._does_user_belong_to_role(email=self.TEST_USER1_EMAIL,
rolename='Viewer'),
"User should have been removed from role 'Viewer'"
)
@mock.patch("airflow.bin.cli.DagBag")
def test_cli_sync_perm(self, dagbag_mock):
self.expect_dagbag_contains([
DAG('has_access_control',
access_control={
'Public': {'can_dag_read'}
}),
DAG('no_access_control')
], dagbag_mock)
self.appbuilder.sm = mock.Mock()
args = self.parser.parse_args([
'sync_perm'
])
cli.sync_perm(args)
assert self.appbuilder.sm.sync_roles.call_count == 1
self.assertEqual(2,
len(self.appbuilder.sm.sync_perm_for_dag.mock_calls))
self.appbuilder.sm.sync_perm_for_dag.assert_any_call(
'has_access_control',
{'Public': {'can_dag_read'}}
)
self.appbuilder.sm.sync_perm_for_dag.assert_any_call(
'no_access_control',
None,
)
def expect_dagbag_contains(self, dags, dagbag_mock):
dagbag = mock.Mock()
dagbag.dags = {dag.dag_id: dag for dag in dags}
dagbag_mock.return_value = dagbag
def test_cli_create_roles(self):
self.assertIsNone(self.appbuilder.sm.find_role('FakeTeamA'))
self.assertIsNone(self.appbuilder.sm.find_role('FakeTeamB'))
args = self.parser.parse_args([
'roles', 'create', 'FakeTeamA', 'FakeTeamB'
])
cli.roles_create(args)
self.assertIsNotNone(self.appbuilder.sm.find_role('FakeTeamA'))
self.assertIsNotNone(self.appbuilder.sm.find_role('FakeTeamB'))
def test_cli_create_roles_is_reentrant(self):
self.assertIsNone(self.appbuilder.sm.find_role('FakeTeamA'))
self.assertIsNone(self.appbuilder.sm.find_role('FakeTeamB'))
args = self.parser.parse_args([
'roles', 'create', 'FakeTeamA', 'FakeTeamB'
])
cli.roles_create(args)
self.assertIsNotNone(self.appbuilder.sm.find_role('FakeTeamA'))
self.assertIsNotNone(self.appbuilder.sm.find_role('FakeTeamB'))
def test_cli_list_roles(self):
self.appbuilder.sm.add_role('FakeTeamA')
self.appbuilder.sm.add_role('FakeTeamB')
with mock.patch('sys.stdout', new_callable=io.StringIO) as mock_stdout:
cli.roles_list(self.parser.parse_args(['roles', 'list']))
stdout = mock_stdout.getvalue()
self.assertIn('FakeTeamA', stdout)
self.assertIn('FakeTeamB', stdout)
def test_cli_list_tasks(self):
for dag_id in self.dagbag.dags.keys():
args = self.parser.parse_args(['tasks', 'list', dag_id])
cli.list_tasks(args)
args = self.parser.parse_args([
'tasks', 'list', 'example_bash_operator', '--tree'])
cli.list_tasks(args)
def test_cli_list_jobs(self):
args = self.parser.parse_args(['dags', 'list_jobs'])
cli.list_jobs(args)
def test_cli_list_jobs_with_args(self):
args = self.parser.parse_args(['dags', 'list_jobs', '--dag_id',
'example_bash_operator',
'--state', 'success',
'--limit', '100'])
cli.list_jobs(args)
@mock.patch("airflow.bin.cli.db.initdb")
def test_cli_initdb(self, initdb_mock):
cli.initdb(self.parser.parse_args(['db', 'init']))
initdb_mock.assert_called_once_with()
@mock.patch("airflow.bin.cli.db.resetdb")
def test_cli_resetdb(self, resetdb_mock):
cli.resetdb(self.parser.parse_args(['db', 'reset', '--yes']))
resetdb_mock.assert_called_once_with()
def test_cli_connections_list(self):
with mock.patch('sys.stdout', new_callable=io.StringIO) as mock_stdout:
cli.connections_list(self.parser.parse_args(['connections', 'list']))
stdout = mock_stdout.getvalue()
conns = [[x.strip("'") for x in re.findall(r"'\w+'", line)[:2]]
for ii, line in enumerate(stdout.split('\n'))
if ii % 2 == 1]
conns = [conn for conn in conns if len(conn) > 0]
# Assert that some of the connections are present in the output as
# expected:
self.assertIn(['aws_default', 'aws'], conns)
self.assertIn(['hive_cli_default', 'hive_cli'], conns)
self.assertIn(['emr_default', 'emr'], conns)
self.assertIn(['mssql_default', 'mssql'], conns)
self.assertIn(['mysql_default', 'mysql'], conns)
self.assertIn(['postgres_default', 'postgres'], conns)
self.assertIn(['wasb_default', 'wasb'], conns)
self.assertIn(['segment_default', 'segment'], conns)
def test_cli_connections_list_redirect(self):
cmd = ['airflow', 'connections', 'list']
with tempfile.TemporaryFile() as fp:
p = subprocess.Popen(cmd, stdout=fp)
p.wait()
self.assertEqual(0, p.returncode)
def test_cli_connections_add_delete(self):
# Add connections:
uri = 'postgresql://airflow:airflow@host:5432/airflow'
with mock.patch('sys.stdout', new_callable=io.StringIO) as mock_stdout:
cli.connections_add(self.parser.parse_args(
['connections', 'add', 'new1',
'--conn_uri=%s' % uri]))
cli.connections_add(self.parser.parse_args(
['connections', 'add', 'new2',
'--conn_uri=%s' % uri]))
cli.connections_add(self.parser.parse_args(
['connections', 'add', 'new3',
'--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"]))
cli.connections_add(self.parser.parse_args(
['connections', 'add', 'new4',
'--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"]))
cli.connections_add(self.parser.parse_args(
['connections', 'add', 'new5',
'--conn_type=hive_metastore', '--conn_login=airflow',
'--conn_password=airflow', '--conn_host=host',
'--conn_port=9083', '--conn_schema=airflow']))
cli.connections_add(self.parser.parse_args(
['connections', 'add', 'new6',
'--conn_uri', "", '--conn_type=google_cloud_platform', '--conn_extra', "{'extra': 'yes'}"]))
stdout = mock_stdout.getvalue()
# Check addition stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tSuccessfully added `conn_id`=new1 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new2 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new3 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new4 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new5 : " +
"hive_metastore://airflow:airflow@host:9083/airflow"),
("\tSuccessfully added `conn_id`=new6 : " +
"google_cloud_platform://:@:")
])
# Attempt to add duplicate
with mock.patch('sys.stdout', new_callable=io.StringIO) as mock_stdout:
cli.connections_add(self.parser.parse_args(
['connections', 'add', 'new1',
'--conn_uri=%s' % uri]))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tA connection with `conn_id`=new1 already exists",
])
# Attempt to add without providing conn_uri
with self.assertRaises(SystemExit) as exc:
cli.connections_add(self.parser.parse_args(
['connections', 'add', 'new']))
self.assertEqual(
exc.exception.code,
"The following args are required to add a connection: ['conn_uri or conn_type']"
)
# Prepare to add connections
session = settings.Session()
extra = {'new1': None,
'new2': None,
'new3': "{'extra': 'yes'}",
'new4': "{'extra': 'yes'}"}
# Add connections
for index in range(1, 6):
conn_id = 'new%s' % index
result = (session
.query(Connection)
.filter(Connection.conn_id == conn_id)
.first())
result = (result.conn_id, result.conn_type, result.host,
result.port, result.get_extra())
if conn_id in ['new1', 'new2', 'new3', 'new4']:
self.assertEqual(result, (conn_id, 'postgres', 'host', 5432,
extra[conn_id]))
elif conn_id == 'new5':
self.assertEqual(result, (conn_id, 'hive_metastore', 'host',
9083, None))
elif conn_id == 'new6':
self.assertEqual(result, (conn_id, 'google_cloud_platform',
None, None, "{'extra': 'yes'}"))
# Delete connections
with mock.patch('sys.stdout', new_callable=io.StringIO) as mock_stdout:
cli.connections_delete(self.parser.parse_args(
['connections', 'delete', 'new1']))
cli.connections_delete(self.parser.parse_args(
['connections', 'delete', 'new2']))
cli.connections_delete(self.parser.parse_args(
['connections', 'delete', 'new3']))
cli.connections_delete(self.parser.parse_args(
['connections', 'delete', 'new4']))
cli.connections_delete(self.parser.parse_args(
['connections', 'delete', 'new5']))
cli.connections_delete(self.parser.parse_args(
['connections', 'delete', 'new6']))
stdout = mock_stdout.getvalue()
# Check deletion stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tSuccessfully deleted `conn_id`=new1",
"\tSuccessfully deleted `conn_id`=new2",
"\tSuccessfully deleted `conn_id`=new3",
"\tSuccessfully deleted `conn_id`=new4",
"\tSuccessfully deleted `conn_id`=new5",
"\tSuccessfully deleted `conn_id`=new6"
])
# Check deletions
for index in range(1, 7):
conn_id = 'new%s' % index
result = (session.query(Connection)
.filter(Connection.conn_id == conn_id)
.first())
self.assertTrue(result is None)
# Attempt to delete a non-existing connection
with mock.patch('sys.stdout', new_callable=io.StringIO) as mock_stdout:
cli.connections_delete(self.parser.parse_args(
['connections', 'delete', 'fake']))
stdout = mock_stdout.getvalue()
# Check deletion attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tDid not find a connection with `conn_id`=fake",
])
session.close()
def test_cli_test(self):
cli.test(self.parser.parse_args([
'tasks', 'test', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
cli.test(self.parser.parse_args([
'tasks', 'test', 'example_bash_operator', 'runme_0', '--dry_run',
DEFAULT_DATE.isoformat()]))
def test_cli_test_with_params(self):
cli.test(self.parser.parse_args([
'tasks', 'test', 'example_passing_params_via_test_command', 'run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
cli.test(self.parser.parse_args([
'tasks', 'test', 'example_passing_params_via_test_command', 'also_run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
def test_cli_run(self):
cli.run(self.parser.parse_args([
'tasks', 'run', 'example_bash_operator', 'runme_0', '-l',
DEFAULT_DATE.isoformat()]))
def test_task_state(self):
cli.task_state(self.parser.parse_args([
'tasks', 'state', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
def test_dag_state(self):
self.assertEqual(None, cli.dag_state(self.parser.parse_args([
'dags', 'state', 'example_bash_operator', DEFAULT_DATE.isoformat()])))
def test_pause(self):
args = self.parser.parse_args([
'dags', 'pause', 'example_bash_operator'])
cli.pause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [True, 1])
args = self.parser.parse_args([
'dags', 'unpause', 'example_bash_operator'])
cli.unpause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [False, 0])
def test_subdag_clear(self):
args = self.parser.parse_args([
'tasks', 'clear', 'example_subdag_operator', '--yes'])
cli.clear(args)
args = self.parser.parse_args([
'tasks', 'clear', 'example_subdag_operator', '--yes', '--exclude_subdags'])
cli.clear(args)
def test_parentdag_downstream_clear(self):
args = self.parser.parse_args([
'tasks', 'clear', 'example_subdag_operator.section-1', '--yes'])
cli.clear(args)
args = self.parser.parse_args([
'tasks', 'clear', 'example_subdag_operator.section-1', '--yes',
'--exclude_parentdag'])
cli.clear(args)
def test_get_dags(self):
dags = cli.get_dags(self.parser.parse_args(['tasks', 'clear', 'example_subdag_operator',
'--yes']))
self.assertEqual(len(dags), 1)
dags = cli.get_dags(self.parser.parse_args(['tasks', 'clear', 'subdag', '-dx', '--yes']))
self.assertGreater(len(dags), 1)
with self.assertRaises(AirflowException):
cli.get_dags(self.parser.parse_args(['tasks', 'clear', 'foobar', '-dx', '--yes']))
def test_process_subdir_path_with_placeholder(self):
self.assertEqual(os.path.join(settings.DAGS_FOLDER, 'abc'), cli.process_subdir('DAGS_FOLDER/abc'))
def test_trigger_dag(self):
cli.trigger_dag(self.parser.parse_args([
'dags', 'trigger', 'example_bash_operator',
'-c', '{"foo": "bar"}']))
self.assertRaises(
ValueError,
cli.trigger_dag,
self.parser.parse_args([
'dags', 'trigger', 'example_bash_operator',
'--run_id', 'trigger_dag_xxx',
'-c', 'NOT JSON'])
)
def test_delete_dag(self):
DM = DagModel
key = "my_dag_id"
session = settings.Session()
session.add(DM(dag_id=key))
session.commit()
cli.delete_dag(self.parser.parse_args([
'dags', 'delete', key, '--yes']))
self.assertEqual(session.query(DM).filter_by(dag_id=key).count(), 0)
self.assertRaises(
AirflowException,
cli.delete_dag,
self.parser.parse_args([
'dags', 'delete',
'does_not_exist_dag',
'--yes'])
)
def test_delete_dag_existing_file(self):
# Test to check that the DAG should be deleted even if
# the file containing it is not deleted
DM = DagModel
key = "my_dag_id"
session = settings.Session()
with tempfile.NamedTemporaryFile() as f:
session.add(DM(dag_id=key, fileloc=f.name))
session.commit()
cli.delete_dag(self.parser.parse_args([
'dags', 'delete', key, '--yes']))
self.assertEqual(session.query(DM).filter_by(dag_id=key).count(), 0)
def test_pool_create(self):
cli.pool_set(self.parser.parse_args(['pools', 'set', 'foo', '1', 'test']))
self.assertEqual(self.session.query(Pool).count(), 1)
def test_pool_get(self):
cli.pool_set(self.parser.parse_args(['pools', 'set', 'foo', '1', 'test']))
try:
cli.pool_get(self.parser.parse_args(['pools', 'get', 'foo']))
except Exception as e:
self.fail("The 'pool -g foo' command raised unexpectedly: %s" % e)
def test_pool_delete(self):
cli.pool_set(self.parser.parse_args(['pools', 'set', 'foo', '1', 'test']))
cli.pool_delete(self.parser.parse_args(['pools', 'delete', 'foo']))
self.assertEqual(self.session.query(Pool).count(), 0)
def test_pool_import_export(self):
# Create two pools first
pool_config_input = {
"foo": {
"description": "foo_test",
"slots": 1
},
"baz": {
"description": "baz_test",
"slots": 2
}
}
with open('pools_import.json', mode='w') as file:
json.dump(pool_config_input, file)
# Import json
try:
cli.pool_import(self.parser.parse_args(['pools', 'import', 'pools_import.json']))
except Exception as e:
self.fail("The 'pool import pools_import.json' failed: %s" % e)
# Export json
try:
cli.pool_export(self.parser.parse_args(['pools', 'export', 'pools_export.json']))
except Exception as e:
self.fail("The 'pool export pools_export.json' failed: %s" % e)
with open('pools_export.json', mode='r') as file:
pool_config_output = json.load(file)
self.assertEqual(
pool_config_input,
pool_config_output,
"Input and output pool files are not same")
os.remove('pools_import.json')
os.remove('pools_export.json')
def test_variables(self):
# Checks if all subcommands are properly received
cli.variables_set(self.parser.parse_args([
'variables', 'set', 'foo', '{"foo":"bar"}']))
cli.variables_get(self.parser.parse_args([
'variables', 'get', 'foo']))
cli.variables_get(self.parser.parse_args([
'variables', 'get', 'baz', '-d', 'bar']))
cli.variables_list(self.parser.parse_args([
'variables', 'list']))
cli.variables_delete(self.parser.parse_args([
'variables', 'delete', 'bar']))
cli.variables_import(self.parser.parse_args([
'variables', 'import', DEV_NULL]))
cli.variables_export(self.parser.parse_args([
'variables', 'export', DEV_NULL]))
cli.variables_set(self.parser.parse_args([
'variables', 'set', 'bar', 'original']))
# First export
cli.variables_export(self.parser.parse_args([
'variables', 'export', 'variables1.json']))
first_exp = open('variables1.json', 'r')
cli.variables_set(self.parser.parse_args([
'variables', 'set', 'bar', 'updated']))
cli.variables_set(self.parser.parse_args([
'variables', 'set', 'foo', '{"foo":"oops"}']))
cli.variables_delete(self.parser.parse_args([
'variables', 'delete', 'foo']))
# First import
cli.variables_import(self.parser.parse_args([
'variables', 'import', 'variables1.json']))
self.assertEqual('original', Variable.get('bar'))
self.assertEqual('{\n "foo": "bar"\n}', Variable.get('foo'))
# Second export
cli.variables_export(self.parser.parse_args([
'variables', 'export', 'variables2.json']))
second_exp = open('variables2.json', 'r')
self.assertEqual(first_exp.read(), second_exp.read())
second_exp.close()
first_exp.close()
# Second import
cli.variables_import(self.parser.parse_args([
'variables', 'import', 'variables2.json']))
self.assertEqual('original', Variable.get('bar'))
self.assertEqual('{\n "foo": "bar"\n}', Variable.get('foo'))
# Set a dict
cli.variables_set(self.parser.parse_args([
'variables', 'set', 'dict', '{"foo": "oops"}']))
# Set a list
cli.variables_set(self.parser.parse_args([
'variables', 'set', 'list', '["oops"]']))
# Set str
cli.variables_set(self.parser.parse_args([
'variables', 'set', 'str', 'hello string']))
# Set int
cli.variables_set(self.parser.parse_args([
'variables', 'set', 'int', '42']))
# Set float
cli.variables_set(self.parser.parse_args([
'variables', 'set', 'float', '42.0']))
# Set true
cli.variables_set(self.parser.parse_args([
'variables', 'set', 'true', 'true']))
# Set false
cli.variables_set(self.parser.parse_args([
'variables', 'set', 'false', 'false']))
# Set none
cli.variables_set(self.parser.parse_args([
'variables', 'set', 'null', 'null']))
# Export and then import
cli.variables_export(self.parser.parse_args([
'variables', 'export', 'variables3.json']))
cli.variables_import(self.parser.parse_args([
'variables', 'import', 'variables3.json']))
# Assert value
self.assertEqual({'foo': 'oops'}, models.Variable.get('dict', deserialize_json=True))
self.assertEqual(['oops'], models.Variable.get('list', deserialize_json=True))
self.assertEqual('hello string', models.Variable.get('str')) # cannot json.loads(str)
self.assertEqual(42, models.Variable.get('int', deserialize_json=True))
self.assertEqual(42.0, models.Variable.get('float', deserialize_json=True))
self.assertEqual(True, models.Variable.get('true', deserialize_json=True))
self.assertEqual(False, models.Variable.get('false', deserialize_json=True))
self.assertEqual(None, models.Variable.get('null', deserialize_json=True))
os.remove('variables1.json')
os.remove('variables2.json')
os.remove('variables3.json')
def _wait_pidfile(self, pidfile):
while True:
try:
with open(pidfile) as file:
return int(file.read())
except Exception:
sleep(1)
def test_cli_webserver_foreground(self):
# Confirm that webserver hasn't been launched.
# pgrep returns exit status 1 if no process matched.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Run webserver in foreground and terminate it.
p = subprocess.Popen(["airflow", "webserver"])
p.terminate()
p.wait()
# Assert that no process remains.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
@unittest.skipIf("TRAVIS" in os.environ and bool(os.environ["TRAVIS"]),
"Skipping test due to lack of required file permission")
def test_cli_webserver_foreground_with_pid(self):
# Run webserver in foreground with --pid option
pidfile = tempfile.mkstemp()[1]
p = subprocess.Popen(["airflow", "webserver", "--pid", pidfile])
# Check the file specified by --pid option exists
self._wait_pidfile(pidfile)
# Terminate webserver
p.terminate()
p.wait()
@unittest.skipIf("TRAVIS" in os.environ and bool(os.environ["TRAVIS"]),
"Skipping test due to lack of required file permission")
def test_cli_webserver_background(self):
import psutil
# Confirm that webserver hasn't been launched.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Run webserver in background.
subprocess.Popen(["airflow", "webserver", "-D"])
pidfile = cli.setup_locations("webserver")[0]
self._wait_pidfile(pidfile)
# Assert that gunicorn and its monitor are launched.
self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Terminate monitor process.
pidfile = cli.setup_locations("webserver-monitor")[0]
pid = self._wait_pidfile(pidfile)
p = psutil.Process(pid)
p.terminate()
p.wait()
# Assert that no process remains.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Patch for causing webserver timeout
@mock.patch("airflow.bin.cli.get_num_workers_running", return_value=0)
def test_cli_webserver_shutdown_when_gunicorn_master_is_killed(self, _):
# Shorten timeout so that this test doesn't take too long time
args = self.parser.parse_args(['webserver'])
with conf_vars({('webserver', 'web_server_master_timeout'): '10'}):
with self.assertRaises(SystemExit) as e:
cli.webserver(args)
self.assertEqual(e.exception.code, 1)
class FakeWebHDFSHook:
def __init__(self, conn_id):
self.conn_id = conn_id
def get_conn(self):
return self.conn_id
def check_for_path(self, hdfs_path):
return hdfs_path
class FakeSnakeBiteClientException(Exception):
pass
class FakeSnakeBiteClient:
def __init__(self):
self.started = True
def ls(self, path, include_toplevel=False):
"""
the fake snakebite client
:param path: the array of path to test
:param include_toplevel: to return the toplevel directory info
:return: a list for path for the matching queries
"""
if path[0] == '/datadirectory/empty_directory' and not include_toplevel:
return []
elif path[0] == '/datadirectory/datafile':
return [{
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/datafile'
}]
elif path[0] == '/datadirectory/empty_directory' and include_toplevel:
return [{
'group': 'supergroup',
'permission': 493,
'file_type': 'd',
'access_time': 0,
'block_replication': 0,
'modification_time': 1481132141540,
'length': 0,
'blocksize': 0,
'owner': 'hdfs',
'path': '/datadirectory/empty_directory'
}]
elif path[0] == '/datadirectory/not_empty_directory' and include_toplevel:
return [{
'group': 'supergroup',
'permission': 493,
'file_type': 'd',
'access_time': 0,
'block_replication': 0,
'modification_time': 1481132141540,
'length': 0,
'blocksize': 0,
'owner': 'hdfs',
'path': '/datadirectory/empty_directory'
}, {
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/not_empty_directory/test_file'
}]
elif path[0] == '/datadirectory/not_empty_directory':
return [{
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/not_empty_directory/test_file'
}]
elif path[0] == '/datadirectory/not_existing_file_or_directory':
raise FakeSnakeBiteClientException
elif path[0] == '/datadirectory/regex_dir':
return [{
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862, 'length': 12582912,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/regex_dir/test1file'
}, {
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/regex_dir/test2file'
}, {
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/regex_dir/test3file'
}, {
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/regex_dir/copying_file_1.txt._COPYING_'
}, {
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/regex_dir/copying_file_3.txt.sftp'
}]
else:
raise FakeSnakeBiteClientException
class FakeHDFSHook:
def __init__(self, conn_id=None):
self.conn_id = conn_id
def get_conn(self):
client = FakeSnakeBiteClient()
return client
class TestConnection(unittest.TestCase):
def setUp(self):
utils.db.initdb()
os.environ['AIRFLOW_CONN_TEST_URI'] = (
'postgres://username:password@ec2.compute.com:5432/the_database')
os.environ['AIRFLOW_CONN_TEST_URI_NO_CREDS'] = (
'postgres://ec2.compute.com/the_database')
def tearDown(self):
env_vars = ['AIRFLOW_CONN_TEST_URI', 'AIRFLOW_CONN_AIRFLOW_DB']
for ev in env_vars:
if ev in os.environ:
del os.environ[ev]
def test_using_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
def test_using_unix_socket_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri_no_creds')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertIsNone(c.login)
self.assertIsNone(c.password)
self.assertIsNone(c.port)
def test_param_setup(self):
c = Connection(conn_id='local_mysql', conn_type='mysql',
host='localhost', login='airflow',
password='airflow', schema='airflow')
self.assertEqual('localhost', c.host)
self.assertEqual('airflow', c.schema)
self.assertEqual('airflow', c.login)
self.assertEqual('airflow', c.password)
self.assertIsNone(c.port)
def test_env_var_priority(self):
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertNotEqual('ec2.compute.com', c.host)
os.environ['AIRFLOW_CONN_AIRFLOW_DB'] = \
'postgres://username:password@ec2.compute.com:5432/the_database'
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
del os.environ['AIRFLOW_CONN_AIRFLOW_DB']
def test_dbapi_get_uri(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
self.assertEqual('postgres://username:password@ec2.compute.com:5432/the_database', hook.get_uri())
conn2 = BaseHook.get_connection(conn_id='test_uri_no_creds')
hook2 = conn2.get_hook()
self.assertEqual('postgres://ec2.compute.com/the_database', hook2.get_uri())
def test_dbapi_get_sqlalchemy_engine(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
engine = hook.get_sqlalchemy_engine()
self.assertIsInstance(engine, sqlalchemy.engine.Engine)
self.assertEqual('postgres://username:password@ec2.compute.com:5432/the_database', str(engine.url))
def test_get_connections_env_var(self):
conns = SqliteHook.get_connections(conn_id='test_uri')
assert len(conns) == 1
assert conns[0].host == 'ec2.compute.com'
assert conns[0].schema == 'the_database'
assert conns[0].login == 'username'
assert conns[0].password == 'password'
assert conns[0].port == 5432
class TestWebHDFSHook(unittest.TestCase):
def test_simple_init(self):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook()
self.assertIsNone(c.proxy_user)
def test_init_proxy_user(self):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook(proxy_user='someone')
self.assertEqual('someone', c.proxy_user)
HDFSHook = None # type: Optional[hdfs_hook.HDFSHook]
snakebite = None # type: None
@unittest.skipIf(HDFSHook is None,
"Skipping test because HDFSHook is not installed")
class TestHDFSHook(unittest.TestCase):
def setUp(self):
os.environ['AIRFLOW_CONN_HDFS_DEFAULT'] = 'hdfs://localhost:8020'
def test_get_client(self):
client = HDFSHook(proxy_user='foo').get_conn()
self.assertIsInstance(client, snakebite.client.Client)
self.assertEqual('localhost', client.host)
self.assertEqual(8020, client.port)
self.assertEqual('foo', client.service.channel.effective_user)
@mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')
@mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')
def test_get_autoconfig_client(self, mock_get_connections,
MockAutoConfigClient):
c = Connection(conn_id='hdfs', conn_type='hdfs',
host='localhost', port=8020, login='foo',
extra=json.dumps({'autoconfig': True}))
mock_get_connections.return_value = [c]
HDFSHook(hdfs_conn_id='hdfs').get_conn()
MockAutoConfigClient.assert_called_once_with(effective_user='foo',
use_sasl=False)
@mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')
def test_get_autoconfig_client_no_conn(self, MockAutoConfigClient):
HDFSHook(hdfs_conn_id='hdfs_missing', autoconfig=True).get_conn()
MockAutoConfigClient.assert_called_once_with(effective_user=None,
use_sasl=False)
@mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')
def test_get_ha_client(self, mock_get_connections):
c1 = Connection(conn_id='hdfs_default', conn_type='hdfs',
host='localhost', port=8020)
c2 = Connection(conn_id='hdfs_default', conn_type='hdfs',
host='localhost2', port=8020)
mock_get_connections.return_value = [c1, c2]
client = HDFSHook().get_conn()
self.assertIsInstance(client, snakebite.client.HAClient)
send_email_test = mock.Mock()
class TestEmail(unittest.TestCase):
def setUp(self):
conf.remove_option('email', 'EMAIL_BACKEND')
@mock.patch('airflow.utils.email.send_email')
def test_default_backend(self, mock_send_email):
res = utils.email.send_email('to', 'subject', 'content')
mock_send_email.assert_called_once_with('to', 'subject', 'content')
self.assertEqual(mock_send_email.return_value, res)
@mock.patch('airflow.utils.email.send_email_smtp')
def test_custom_backend(self, mock_send_email):
with conf_vars({('email', 'email_backend'): 'tests.core.send_email_test'}):
utils.email.send_email('to', 'subject', 'content')
send_email_test.assert_called_once_with(
'to', 'subject', 'content', files=None, dryrun=False,
cc=None, bcc=None, mime_charset='utf-8', mime_subtype='mixed')
self.assertFalse(mock_send_email.called)
class TestEmailSmtp(unittest.TestCase):
def setUp(self):
conf.set('smtp', 'SMTP_SSL', 'False')
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b'attachment')
attachment.seek(0)
utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name])
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
self.assertEqual(conf.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])
self.assertEqual(['to'], call_args[1])
msg = call_args[2]
self.assertEqual('subject', msg['Subject'])
self.assertEqual(conf.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])
self.assertEqual(2, len(msg.get_payload()))
filename = 'attachment; filename="' + os.path.basename(attachment.name) + '"'
self.assertEqual(filename, msg.get_payload()[-1].get('Content-Disposition'))
mimeapp = MIMEApplication('attachment')
self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_smtp_with_multibyte_content(self, mock_send_mime):
utils.email.send_email_smtp('to', 'subject', '🔥', mime_charset='utf-8')
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
msg = call_args[2]
mimetext = MIMEText('🔥', 'mixed', 'utf-8')
self.assertEqual(mimetext.get_payload(), msg.get_payload()[0].get_payload())
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_bcc_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b'attachment')
attachment.seek(0)
utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name], cc='cc', bcc='bcc')
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
self.assertEqual(conf.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])
self.assertEqual(['to', 'cc', 'bcc'], call_args[1])
msg = call_args[2]
self.assertEqual('subject', msg['Subject'])
self.assertEqual(conf.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])
self.assertEqual(2, len(msg.get_payload()))
self.assertEqual('attachment; filename="' + os.path.basename(attachment.name) + '"',
msg.get_payload()[-1].get('Content-Disposition'))
mimeapp = MIMEApplication('attachment')
self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime(self, mock_smtp, mock_smtp_ssl):
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
msg = MIMEMultipart()
utils.email.send_MIME_email('from', 'to', msg, dryrun=False)
mock_smtp.assert_called_once_with(
conf.get('smtp', 'SMTP_HOST'),
conf.getint('smtp', 'SMTP_PORT'),
)
self.assertTrue(mock_smtp.return_value.starttls.called)
mock_smtp.return_value.login.assert_called_once_with(
conf.get('smtp', 'SMTP_USER'),
conf.get('smtp', 'SMTP_PASSWORD'),
)
mock_smtp.return_value.sendmail.assert_called_once_with('from', 'to', msg.as_string())
self.assertTrue(mock_smtp.return_value.quit.called)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_ssl(self, mock_smtp, mock_smtp_ssl):
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
with conf_vars({('smtp', 'smtp_ssl'): 'True'}):
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)
self.assertFalse(mock_smtp.called)
mock_smtp_ssl.assert_called_once_with(
conf.get('smtp', 'SMTP_HOST'),
conf.getint('smtp', 'SMTP_PORT'),
)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_noauth(self, mock_smtp, mock_smtp_ssl):
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
with conf_vars({
('smtp', 'smtp_user'): None,
('smtp', 'smtp_password'): None,
}):
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)
self.assertFalse(mock_smtp_ssl.called)
mock_smtp.assert_called_once_with(
conf.get('smtp', 'SMTP_HOST'),
conf.getint('smtp', 'SMTP_PORT'),
)
self.assertFalse(mock_smtp.login.called)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_dryrun(self, mock_smtp, mock_smtp_ssl):
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=True)
self.assertFalse(mock_smtp.called)
self.assertFalse(mock_smtp_ssl.called)
if __name__ == '__main__':
unittest.main()
|
scylla_node.py
|
# ccm node
from __future__ import with_statement
import datetime
import errno
import os
import signal
import shutil
import socket
import stat
import subprocess
import time
import threading
import psutil
import yaml
import glob
from six import print_
from six.moves import xrange
from ccmlib import common
from ccmlib.node import Node
from ccmlib.node import NodeError
def wait_for(func, timeout, first=0.0, step=1.0, text=None):
"""
Wait until func() evaluates to True.
If func() evaluates to True before timeout expires, return the
value of func(). Otherwise return None.
:param func: Function that will be evaluated.
:param timeout: Timeout in seconds
:param first: Time to sleep before first attempt
:param step: Time to sleep between attempts in seconds
:param text: Text to print while waiting, for debug purposes
"""
start_time = time.time()
end_time = time.time() + timeout
time.sleep(first)
while time.time() < end_time:
if text:
print_("%s (%f secs)" % (text, (time.time() - start_time)))
output = func()
if output:
return output
time.sleep(step)
return None
class ScyllaNode(Node):
"""
Provides interactions to a Scylla node.
"""
def __init__(self, name, cluster, auto_bootstrap, thrift_interface,
storage_interface, jmx_port, remote_debug_port, initial_token,
save=True, binary_interface=None, scylla_manager=None):
super(ScyllaNode, self).__init__(name, cluster, auto_bootstrap,
thrift_interface, storage_interface,
jmx_port, remote_debug_port,
initial_token, save, binary_interface)
self.__global_log_level = 'info'
self.__classes_log_level = {}
self.get_cassandra_version()
self._process_jmx = None
self._process_jmx_waiter = None
self._process_scylla = None
self._process_scylla_waiter = None
self._process_agent = None
self._process_agent_waiter = None
self._smp = 1
self._smp_set_during_test = False
self._mem_mb_per_cpu = 512
self._mem_set_during_test = False
self.__conf_updated = False
self.scylla_manager = scylla_manager
self.jmx_pid = None
self.agent_pid = None
def set_smp(self, smp):
self._smp = smp
self._smp_set_during_test = True
def set_mem_mb_per_cpu(self, mem):
self._mem_mb_per_cpu = mem
self._mem_set_during_test = True
def get_install_cassandra_root(self):
return self.get_tool_java_dir()
def get_node_cassandra_root(self):
return os.path.join(self.get_path())
def get_conf_dir(self):
"""
Returns the path to the directory where Cassandra config are located
"""
return os.path.join(self.get_path(), 'conf')
def get_tool(self, toolname):
return common.join_bin(self.get_tool_java_dir(), 'bin', toolname)
def get_tool_args(self, toolname):
raise NotImplementedError('ScyllaNode.get_tool_args')
def get_env(self):
update_conf = not self.__conf_updated
if update_conf:
self.__conf_updated = True
return common.make_cassandra_env(self.get_install_cassandra_root(),
self.get_node_cassandra_root(), update_conf=update_conf)
def get_cassandra_version(self):
# TODO: Handle versioning
return '2.2'
def set_log_level(self, new_level, class_name=None):
known_level = {'TRACE' : 'trace', 'DEBUG' : 'debug', 'INFO' : 'info', 'WARN' : 'warn', 'ERROR' : 'error', 'OFF' : 'info'}
if not new_level in known_level:
raise common.ArgumentError("Unknown log level %s (use one of %s)" % (new_level, " ".join(known_level)))
new_log_level = known_level[new_level]
# TODO class_name can be validated against help-loggers
if class_name:
self.__classes_log_level[class_name] = new_log_level
else:
self.__global_log_level = new_log_level
return self
def set_workload(self, workload):
raise NotImplementedError('ScyllaNode.set_workload')
def cpuset(self, id, count, cluster_id):
# leaving one core for other executables to run
allocated_cpus = psutil.cpu_count() - 1
start_id = (id * count + cluster_id) % allocated_cpus
cpuset = []
for cpuid in xrange(start_id, start_id + count):
cpuset.append(str(cpuid % allocated_cpus))
return cpuset
def _wait_for_jmx(self):
if self._process_jmx:
self._process_jmx.wait()
def _wait_for_scylla(self):
if self._process_scylla:
self._process_scylla.wait()
def _wait_for_agent(self):
if self._process_agent:
self._process_agent.wait()
def _start_jmx(self, data):
jmx_jar_dir = os.path.join(self.get_path(), 'bin')
jmx_java_bin = os.path.join(jmx_jar_dir, 'symlinks', 'scylla-jmx')
jmx_jar = os.path.join(jmx_jar_dir, 'scylla-jmx-1.0.jar')
args = [jmx_java_bin,
'-Dapiaddress=%s' % data['listen_address'],
'-Djavax.management.builder.initial=com.scylladb.jmx.utils.APIBuilder',
'-Dcom.sun.management.jmxremote',
'-Dcom.sun.management.jmxremote.port=%s' % self.jmx_port,
'-Dcom.sun.management.jmxremote.rmi.port=%s' % self.jmx_port,
'-Dcom.sun.management.jmxremote.local.only=false',
'-Xmx256m',
'-XX:+UseSerialGC',
'-Dcom.sun.management.jmxremote.authenticate=false',
'-Dcom.sun.management.jmxremote.ssl=false',
'-jar',
jmx_jar]
log_file = os.path.join(self.get_path(), 'logs', 'system.log.jmx')
jmx_log = open(log_file, 'a')
env_copy = os.environ
env_copy['SCYLLA_HOME'] = self.get_path()
self._process_jmx = subprocess.Popen(args, stdout=jmx_log,
stderr=jmx_log,
close_fds=True,
env=env_copy)
self._process_jmx.poll()
# When running on ccm standalone, the waiter thread would block
# the create commands. Besides in that mode, waiting is unnecessary,
# since the original popen reference is garbage collected.
standalone = os.environ.get('SCYLLA_CCM_STANDALONE', None)
if standalone is None:
self._process_jmx_waiter = threading.Thread(target=self._wait_for_jmx)
self._process_jmx_waiter.start()
pid_filename = os.path.join(self.get_path(), 'scylla-jmx.pid')
with open(pid_filename, 'w') as pid_file:
pid_file.write(str(self._process_jmx.pid))
def _start_scylla(self, args, marks, update_pid, wait_other_notice,
wait_for_binary_proto):
log_file = os.path.join(self.get_path(), 'logs', 'system.log')
# In case we are restarting a node
# we risk reading the old cassandra.pid file
self._delete_old_pid()
scylla_log = open(log_file, 'a')
try:
env_copy = self._launch_env
except AttributeError:
env_copy = os.environ
env_copy['SCYLLA_HOME'] = self.get_path()
self._process_scylla = subprocess.Popen(args, stdout=scylla_log,
stderr=scylla_log,
close_fds=True,
env=env_copy)
self._process_scylla.poll()
# When running on ccm standalone, the waiter thread would block
# the create commands. Besides in that mode, waiting is unnecessary,
# since the original popen reference is garbage collected.
standalone = os.environ.get('SCYLLA_CCM_STANDALONE', None)
if standalone is None:
self._process_scylla_waiter = threading.Thread(target=self._wait_for_scylla)
self._process_scylla_waiter.start()
pid_filename = os.path.join(self.get_path(), 'cassandra.pid')
with open(pid_filename, 'w') as pid_file:
pid_file.write(str(self._process_scylla.pid))
if update_pid:
self._update_pid(self._process_scylla)
if not self.is_running():
raise NodeError("Error starting node %s" % self.name,
self._process_scylla)
if wait_other_notice:
for node, mark in marks:
node.watch_log_for_alive(self, from_mark=mark)
if wait_for_binary_proto:
self.wait_for_binary_interface(from_mark=self.mark, process=self._process_scylla)
else:
time.sleep(2)
return self._process_scylla
def _create_agent_config(self):
conf_file = os.path.join(self.get_conf_dir(), 'scylla-manager-agent.yaml')
ssl_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'scylla_test_ssl')
data = dict()
data['scylla_config_file'] = os.path.join(self.get_conf_dir(), common.SCYLLA_CONF)
data['https'] = "{}:10001".format(self.address())
data['auth_token'] = self.scylla_manager.auth_token
data['tls_cert_file'] = os.path.join(ssl_dir, 'scylla-manager-agent.crt')
data['tls_key_file'] = os.path.join(ssl_dir, 'scylla-manager-agent.key')
data['logger'] = dict(level='debug')
data['scylla'] = {'api_address': "{}".format(self.address()),
'api_port': 10000}
data['prometheus'] = "{}:56090".format(self.address())
with open(conf_file, 'w') as f:
yaml.safe_dump(data, f, default_flow_style=False)
return conf_file
def _start_scylla_manager_agent(self):
agent_bin = os.path.join(self.scylla_manager._get_path(), 'bin', 'scylla-manager-agent')
log_file = os.path.join(self.get_path(), 'logs', 'system.log.manager_agent')
config_file = self._create_agent_config()
agent_log = open(log_file, 'a')
args = [agent_bin,
'--config-file', config_file]
self._process_agent = subprocess.Popen(args, stdout=agent_log,
stderr=agent_log,
close_fds=True)
self._process_agent.poll()
# When running on ccm standalone, the waiter thread would block
# the create commands. Besides in that mode, waiting is unnecessary,
# since the original popen reference is garbage collected.
standalone = os.environ.get('SCYLLA_CCM_STANDALONE', None)
if standalone is None:
self._process_agent_waiter = threading.Thread(target=self._wait_for_agent)
self._process_agent_waiter.start()
pid_filename = os.path.join(self.get_path(), 'scylla-agent.pid')
with open(pid_filename, 'w') as pid_file:
pid_file.write(str(self._process_agent.pid))
api_interface = common.parse_interface(self.address(), 10001)
if not common.check_socket_listening(api_interface, timeout=180):
raise Exception(
"scylla manager agent interface %s:%s is not listening after 180 seconds, scylla manager agent may have failed to start."
% (api_interface[0], api_interface[1]))
def _wait_java_up(self, data):
java_up = False
iteration = 0
while not java_up and iteration < 30:
iteration += 1
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.settimeout(1.0)
s.connect((data['listen_address'], int(self.jmx_port)))
java_up = True
except:
java_up = False
try:
s.close()
except:
pass
time.sleep(1)
return java_up
def pause(self):
self._process_scylla.send_signal(signal.SIGSTOP)
def resume(self):
self._process_scylla.send_signal(signal.SIGCONT)
# Scylla Overload start
def start(self, join_ring=True, no_wait=False, verbose=False,
update_pid=True, wait_other_notice=False, replace_token=None,
replace_address=None, jvm_args=None, wait_for_binary_proto=False,
profile_options=None, use_jna=False, quiet_start=False):
"""
Start the node. Options includes:
- join_ring: if false, start the node with -Dcassandra.join_ring=False
- no_wait: by default, this method returns when the node is started
and listening to clients.
If no_wait=True, the method returns sooner.
- wait_other_notice: if True, this method returns only when all other
live node of the cluster
have marked this node UP.
- replace_token: start the node with the -Dcassandra.replace_token
option.
- replace_address: start the node with the
-Dcassandra.replace_address option.
"""
if jvm_args is None:
jvm_args = []
scylla_cassandra_mapping = {'-Dcassandra.replace_address_first_boot':
'--replace-address-first-boot'}
# Replace args in the form
# ['-Dcassandra.foo=bar'] to ['-Dcassandra.foo', 'bar']
translated_args = []
new_jvm_args = []
for jvm_arg in jvm_args:
if '=' in jvm_arg:
split_option = jvm_arg.split("=")
e_msg = ("Option %s not in the form '-Dcassandra.foo=bar'. "
"Please check your test" % jvm_arg)
assert len(split_option) == 2, e_msg
option, value = split_option
# If we have information on how to translate the jvm option,
# translate it
if option in scylla_cassandra_mapping:
translated_args += [scylla_cassandra_mapping[option],
value]
# Otherwise, just pass it as is
else:
new_jvm_args.append(jvm_arg)
else:
new_jvm_args.append(jvm_arg)
jvm_args = new_jvm_args
if self.is_running():
raise NodeError("%s is already running" % self.name)
for itf in list(self.network_interfaces.values()):
if itf is not None and replace_address is None:
try:
common.check_socket_available(itf)
except Exception as msg:
print("{}. Looking for offending processes...".format(msg))
for proc in psutil.process_iter():
if any(self.cluster.ipprefix in cmd for cmd in proc.cmdline()):
print("name={} pid={} cmdline={}".format(proc.name(), proc.pid, proc.cmdline()))
raise msg
marks = []
if wait_other_notice:
marks = [(node, node.mark_log()) for node in
list(self.cluster.nodes.values()) if node.is_live()]
self.mark = self.mark_log()
launch_bin = common.join_bin(self.get_path(), 'bin', 'scylla')
options_file = os.path.join(self.get_path(), 'conf', 'scylla.yaml')
# TODO: we do not support forcing specific settings
# TODO: workaround for api-address as we do not load it
# from config file scylla#59
conf_file = os.path.join(self.get_conf_dir(), common.SCYLLA_CONF)
with open(conf_file, 'r') as f:
data = yaml.safe_load(f)
jvm_args = jvm_args + ['--api-address', data['api_address']]
jvm_args = jvm_args + ['--collectd-hostname',
'%s.%s' % (socket.gethostname(), self.name)]
# Let's add jvm_args and the translated args
args = [launch_bin, '--options-file', options_file, '--log-to-stdout', '1'] + jvm_args + translated_args
# Lets search for default overrides in SCYLLA_EXT_OPTS
scylla_ext_opts = os.getenv('SCYLLA_EXT_OPTS', "").split()
opts_i = 0
orig_args = list(args)
while opts_i < len(scylla_ext_opts):
if scylla_ext_opts[opts_i].startswith("--scylla-manager="):
opts_i += 1
elif scylla_ext_opts[opts_i].startswith('-'):
add = False
if scylla_ext_opts[opts_i] not in orig_args:
add = True
args.append(scylla_ext_opts[opts_i])
opts_i += 1
while opts_i < len(scylla_ext_opts) and not scylla_ext_opts[opts_i].startswith('-'):
if add:
args.append(scylla_ext_opts[opts_i])
opts_i += 1
if '--developer-mode' not in args:
args += ['--developer-mode', 'true']
if '--smp' not in args:
# If --smp is not passed from cmdline, use default (--smp 1)
args += ['--smp', str(self._smp)]
elif self._smp_set_during_test:
# If node.set_smp() is called during the test, ignore the --smp
# passed from the cmdline.
args[args.index('--smp') + 1] = str(self._smp)
else:
# Update self._smp based on command line parameter.
# It may be used below, along with self._mem_mb_per_cpu, for calculating --memory
self._smp = int(args[args.index('--smp') + 1])
if '--memory' not in args:
# If --memory is not passed from cmdline, use default (512M per cpu)
args += ['--memory', '{}M'.format(self._mem_mb_per_cpu * self._smp)]
elif self._mem_set_during_test:
# If node.set_mem_mb_per_cpu() is called during the test, ignore the --memory
# passed from the cmdline.
args[args.index('--memory') + 1] = '{}M'.format(self._mem_mb_per_cpu * self._smp)
if '--default-log-level' not in args:
args += ['--default-log-level', self.__global_log_level]
# TODO add support for classes_log_level
if '--collectd' not in args:
args += ['--collectd', '0']
if '--cpuset' not in args:
args += ['--overprovisioned']
if '--prometheus-address' not in args:
args += ['--prometheus-address', data['api_address']]
if replace_address:
args += ['--replace-address', replace_address]
args += ['--unsafe-bypass-fsync', '1']
scylla_process = self._start_scylla(args, marks, update_pid,
wait_other_notice,
wait_for_binary_proto)
self._start_jmx(data)
if not self._wait_java_up(data):
e_msg = ("Error starting node %s: unable to connect to scylla-jmx" %
self.name)
raise NodeError(e_msg, scylla_process)
self.is_running()
if self.scylla_manager and self.scylla_manager.is_agent_available:
self._start_scylla_manager_agent()
return scylla_process
def start_dse(self,
join_ring=True,
no_wait=False,
verbose=False,
update_pid=True,
wait_other_notice=False,
replace_token=None,
replace_address=None,
jvm_args=None,
wait_for_binary_proto=False,
profile_options=None,
use_jna=False):
"""
Start the node. Options includes:
- join_ring: if false, start the node with -Dcassandra.join_ring=False
- no_wait: by default, this method returns when the node is started
and listening to clients.
If no_wait=True, the method returns sooner.
- wait_other_notice: if True, this method returns only when all other
live node of the cluster have marked this node UP.
- replace_token: start the node with the -Dcassandra.replace_token
option.
- replace_address: start the node with the
-Dcassandra.replace_address option.
"""
if jvm_args is None:
jvm_args = []
raise NotImplementedError('ScyllaNode.start_dse')
def _update_jmx_pid(self, wait=True):
pidfile = os.path.join(self.get_path(), 'scylla-jmx.pid')
start = time.time()
while not (os.path.isfile(pidfile) and os.stat(pidfile).st_size > 0):
if time.time() - start > 30.0 or not wait:
print_("Timed out waiting for pidfile to be filled "
"(current time is %s)" % (datetime.datetime.now()))
return
else:
time.sleep(0.1)
try:
with open(pidfile, 'r') as f:
self.jmx_pid = int(f.readline().strip())
except IOError as e:
raise NodeError('Problem starting node %s scylla-jmx due to %s' %
(self.name, e))
def _update_scylla_agent_pid(self):
pidfile = os.path.join(self.get_path(), 'scylla-agent.pid')
start = time.time()
while not (os.path.isfile(pidfile) and os.stat(pidfile).st_size > 0):
if time.time() - start > 30.0:
print_("Timed out waiting for pidfile to be filled "
"(current time is %s)" % (datetime.datetime.now()))
break
else:
time.sleep(0.1)
try:
with open(pidfile, 'r') as f:
self.agent_pid = int(f.readline().strip())
except IOError as e:
raise NodeError('Problem starting node %s scylla-agent due to %s' %
(self.name, e))
def wait_until_stopped(self, wait_seconds=127):
start_time = time.time()
wait_time_sec = 1
while True:
if not self.is_running():
return True
elapsed = time.time() - start_time
if elapsed >= wait_seconds:
return False
time.sleep(wait_time_sec)
if elapsed + wait_time_sec > wait_seconds:
wait_time_sec = wait_seconds - elapsed
elif wait_time_sec <= 16:
wait_time_sec *= 2
def stop(self, wait=True, wait_other_notice=False, other_nodes=None, gently=True, wait_seconds=127):
"""
Stop the node.
- wait: if True (the default), wait for the Scylla process to be
really dead. Otherwise return after having sent the kill signal.
stop() will wait up to wait_seconds, by default 127 seconds, for
the Cassandra process to die. After this wait, it will throw an
exception stating it couldn't stop the node.
- wait_other_notice: return only when the other live nodes of the
cluster have marked this node has dead.
- gently: Let Scylla and Scylla JMX clean up and shut down properly.
Otherwise do a 'kill -9' which shuts down faster.
"""
marks = []
if self.is_running():
if wait_other_notice:
if not other_nodes:
other_nodes = list(self.cluster.nodes.values())
marks = [(node, node.mark_log()) for node in
other_nodes if
node.is_live() and node is not self]
self._update_jmx_pid(wait=False)
if self.scylla_manager and self.scylla_manager.is_agent_available:
self._update_scylla_agent_pid()
for proc in [self._process_jmx, self._process_scylla, self._process_agent]:
if proc:
if gently:
try:
proc.terminate()
except OSError:
pass
else:
try:
proc.kill()
except OSError:
pass
else:
signal_mapping = {True: signal.SIGTERM, False: signal.SIGKILL}
for pid in [self.jmx_pid, self.pid, self.agent_pid]:
if pid:
try:
os.kill(pid, signal_mapping[gently])
except OSError:
pass
if not wait and not wait_other_notice:
return True
if not self.wait_until_stopped(wait_seconds):
if self.jmx_pid:
try:
os.kill(self.jmx_pid, signal.SIGKILL)
except OSError:
pass
if gently and self.pid:
# Aborting is intended to generate a core dump
# so the reason the node didn't stop normally can be studied.
print("{} is still running. Trying to generate coredump using kill({}, SIGQUIT)...".format(self.name, self.pid))
try:
os.kill(self.pid, signal.SIGQUIT)
except OSError:
pass
self.wait_until_stopped(300)
if self.is_running():
raise NodeError("Problem stopping node %s" % self.name)
if wait_other_notice:
for node, mark in marks:
node.watch_log_for_death(self, from_mark=mark)
else:
return False
def import_config_files(self):
# TODO: override node - enable logging
self._update_config()
self.copy_config_files()
self.__update_yaml()
self.__copy_logback_files()
def get_tool_java_dir(self):
if 'scylla-repository' in self.get_install_dir():
return os.path.join(self.get_install_dir(), 'scylla-java-tools')
else:
return os.environ.get('TOOLS_JAVA_DIR', os.path.join(self.get_install_dir(), 'resources', 'cassandra'))
def get_jmx_dir(self, relative_repos_root):
return os.environ.get('SCYLLA_JMX_DIR', os.path.join(self.get_install_dir(), relative_repos_root, 'scylla-jmx'))
def __copy_logback_files(self):
shutil.copy(os.path.join(self.get_tool_java_dir(), 'conf', 'logback-tools.xml'),
os.path.join(self.get_conf_dir(), 'logback-tools.xml'))
def import_dse_config_files(self):
raise NotImplementedError('ScyllaNode.import_dse_config_files')
def copy_config_files_dse(self):
raise NotImplementedError('ScyllaNode.copy_config_files_dse')
def hard_link_or_copy(self, src, dst, extra_perms=0, always_copy=False):
def do_copy(src, dst, extra_perms=0):
shutil.copy(src, dst)
os.chmod(dst, os.stat(src).st_mode | extra_perms)
if always_copy:
return do_copy(src, dst, extra_perms)
try:
os.link(src, dst)
except OSError as oserror:
if oserror.errno == errno.EXDEV or oserror.errno == errno.EMLINK:
do_copy(src, dst, extra_perms)
else:
raise RuntimeError("Unable to create hard link from %s to %s: %s" % (src, dst, oserror))
def import_bin_files(self):
# selectively copying files to reduce risk of using unintended items
files = ['cassandra.in.sh', 'nodetool']
os.makedirs(os.path.join(self.get_path(), 'resources', 'cassandra', 'bin'))
for name in files:
self.hard_link_or_copy(os.path.join(self.get_tool_java_dir(),
'bin', name),
os.path.join(self.get_path(),
'resources', 'cassandra',
'bin', name))
# selectively copying files to reduce risk of using unintended items
files = ['sstabledump', 'sstablelevelreset', 'sstablemetadata',
'sstablerepairedset', 'sstablesplit']
os.makedirs(os.path.join(self.get_path(), 'resources', 'cassandra',
'tools', 'bin'))
for name in files:
self.hard_link_or_copy(os.path.join(self.get_tool_java_dir(),
'tools', 'bin', name),
os.path.join(self.get_path(),
'resources', 'cassandra',
'tools', 'bin', name))
# TODO: - currently no scripts only executable - copying exec
scylla_mode = self.cluster.get_scylla_mode()
if scylla_mode == 'reloc':
relative_repos_root = '../..'
self.hard_link_or_copy(os.path.join(self.get_install_dir(), 'bin', 'scylla'),
os.path.join(self.get_bin_dir(), 'scylla'),
stat.S_IEXEC)
os.environ['GNUTLS_SYSTEM_PRIORITY_FILE'] = os.path.join(self.get_install_dir(), 'scylla-core-package/libreloc/gnutls.config')
else:
relative_repos_root = '..'
src = os.path.join(self.get_install_dir(), 'build', scylla_mode, 'scylla')
dst = os.path.join(self.get_bin_dir(), 'scylla')
dbuild_so_dir = os.environ.get('SCYLLA_DBUILD_SO_DIR')
if not dbuild_so_dir:
self.hard_link_or_copy(src, dst, stat.S_IEXEC)
else:
self.hard_link_or_copy(src, dst, stat.S_IEXEC, always_copy=True)
search_pattern = os.path.join(dbuild_so_dir, 'ld-linux-x86-64.so.*')
res = glob.glob(search_pattern)
if not res:
raise RuntimeError('{} not found'.format(search_pattern))
if len(res) > 1:
raise RuntimeError('{}: found too make matches: {}'.format(search_pattern, res))
loader = res[0]
self._launch_env = dict(os.environ)
self._launch_env['LD_LIBRARY_PATH'] = dbuild_so_dir
patchelf_cmd = [loader, os.path.join(dbuild_so_dir, 'patchelf'), '--set-interpreter', loader, dst]
def run_patchelf(patchelf_cmd):
p = subprocess.Popen(patchelf_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=self._launch_env)
(stdout, stderr) = p.communicate()
return (p.returncode, stdout, stderr)
(returncode, stdout, stderr) = run_patchelf(patchelf_cmd)
if returncode != 0:
# Retry after stripping binary if hit
# https://github.com/scylladb/scylla/issues/5245
if stderr == 'read\n':
cmd = ['strip', dst]
subprocess.check_call(cmd)
(returncode, stdout, stderr) = run_patchelf(patchelf_cmd)
if returncode != 0:
raise RuntimeError('{} exited with status {}.\nstdout:{}\nstderr:\n{}'.format(patchelf_cmd, returncode, stdout, stderr))
if 'scylla-repository' in self.get_install_dir():
self.hard_link_or_copy(os.path.join(self.get_install_dir(), 'scylla-jmx', 'scylla-jmx-1.0.jar'),
os.path.join(self.get_bin_dir(), 'scylla-jmx-1.0.jar'))
self.hard_link_or_copy(os.path.join(self.get_install_dir(), 'scylla-jmx', 'scylla-jmx'),
os.path.join(self.get_bin_dir(), 'scylla-jmx'))
else:
self.hard_link_or_copy(os.path.join(self.get_jmx_dir(relative_repos_root), 'target', 'scylla-jmx-1.0.jar'),
os.path.join(self.get_bin_dir(), 'scylla-jmx-1.0.jar'))
self.hard_link_or_copy(os.path.join(self.get_jmx_dir(relative_repos_root), 'scripts', 'scylla-jmx'),
os.path.join(self.get_bin_dir(), 'scylla-jmx'))
os.makedirs(os.path.join(self.get_bin_dir(), 'symlinks'))
os.symlink('/usr/bin/java', os.path.join(self.get_bin_dir(),
'symlinks',
'scylla-jmx'))
parent_dir = os.path.dirname(os.path.realpath(__file__))
resources_bin_dir = os.path.join(parent_dir, 'resources', 'bin')
for name in os.listdir(resources_bin_dir):
filename = os.path.join(resources_bin_dir, name)
if os.path.isfile(filename):
shutil.copy(filename, self.get_bin_dir())
common.add_exec_permission(self.get_bin_dir(), name)
def _save(self):
# TODO: - overwrite node
self.__update_yaml()
self._update_config()
def __update_yaml(self):
# TODO: copied from node.py
conf_file = os.path.join(self.get_conf_dir(), common.SCYLLA_CONF)
with open(conf_file, 'r') as f:
data = yaml.safe_load(f)
data['cluster_name'] = self.cluster.name
data['auto_bootstrap'] = self.auto_bootstrap
data['initial_token'] = self.initial_token
if (not self.cluster.use_vnodes and
self.get_base_cassandra_version() >= 1.2):
data['num_tokens'] = 1
if 'seeds' in data:
# cassandra 0.7
data['seeds'] = self.cluster.get_seeds()
else:
# cassandra 0.8
data['seed_provider'][0]['parameters'][0]['seeds'] = (
','.join(self.cluster.get_seeds()))
data['listen_address'], data['storage_port'] = (
self.network_interfaces['storage'])
data['rpc_address'], data['rpc_port'] = (
self.network_interfaces['thrift'])
if (self.network_interfaces['binary'] is not None and
self.get_base_cassandra_version() >= 1.2):
_, data['native_transport_port'] = self.network_interfaces['binary']
data['data_file_directories'] = [os.path.join(self.get_path(), 'data')]
data['commitlog_directory'] = os.path.join(self.get_path(),
'commitlogs')
data['hints_directory'] = os.path.join(self.get_path(), 'hints')
data['saved_caches_directory'] = os.path.join(self.get_path(),
'saved_caches')
data['view_hints_directory'] = os.path.join(self.get_path(), 'view_hints')
if self.cluster.partitioner:
data['partitioner'] = self.cluster.partitioner
# TODO: add scylla options
data['api_address'] = data['listen_address']
# last win and we want node options to win
full_options = dict(list(self.cluster._config_options.items()) +
list(self.get_config_options().items()))
for name in full_options:
value = full_options[name]
if value is None:
try:
del data[name]
except KeyError:
# it is fine to remove a key not there:w
pass
else:
try:
if isinstance(data[name], dict):
for option in full_options[name]:
data[name][option] = full_options[name][option]
else:
data[name] = full_options[name]
except KeyError:
data[name] = full_options[name]
with open(conf_file, 'w') as f:
yaml.safe_dump(data, f, default_flow_style=False)
# TODO: - for now create a cassandra conf file leaving only
# cassandra config items - this should be removed once tools are
# updated to remove scylla conf and use a shrunk version
cassandra_conf_file = os.path.join(self.get_conf_dir(),
common.CASSANDRA_CONF)
cassandra_conf_items = {'authenticator': 0,
'authorizer': 0,
'auto_snapshot': 0,
'batch_size_warn_threshold_in_kb': 0,
'batchlog_replay_throttle_in_kb': 0,
'broadcast_address': 0,
'broadcast_rpc_address': 0,
'cas_contention_timeout_in_ms': 0,
'client_encryption_options': 0,
'cluster_name': 0,
'column_index_size_in_kb': 0,
'commit_failure_policy': 0,
'commitlog_directory': 0,
'commitlog_segment_size_in_mb': 0,
'commitlog_sync': 0,
'commitlog_sync_batch_window_in_ms': 0,
'commitlog_sync_period_in_ms': 0,
'commitlog_total_space_in_mb': 0,
'compaction_large_partition_warning_threshold_mb': 0,
'compaction_throughput_mb_per_sec': 0,
'concurrent_compactors': 0,
'concurrent_counter_writes': 0,
'concurrent_reads': 0,
'concurrent_writes': 0,
'counter_cache_keys_to_save': 0,
'counter_cache_save_period': 0,
'counter_cache_size_in_mb': 0,
'counter_write_request_timeout_in_ms': 0,
'cross_node_timeout': 0,
'data_file_directories': 0,
'disk_failure_policy': 0,
'dynamic_snitch_badness_threshold': 0,
'dynamic_snitch_reset_interval_in_ms': 0,
'dynamic_snitch_update_interval_in_ms': 0,
'endpoint_snitch': 0,
'file_cache_size_in_mb': 0,
'hinted_handoff_enabled': 0,
'hinted_handoff_throttle_in_kb': 0,
'incremental_backups': 0,
'index_summary_capacity_in_mb': 0,
'index_summary_resize_interval_in_minutes': 0,
'inter_dc_stream_throughput_outbound_megabits_per_sec': 0,
'inter_dc_tcp_nodelay': 0,
'internode_authenticator': 0,
'internode_compression': 0,
'key_cache_keys_to_save': 0,
'key_cache_save_period': 0,
'key_cache_size_in_mb': 0,
'listen_address': 0,
'listen_interface': 0,
'listen_interface_prefer_ipv6': 0,
'max_hint_window_in_ms': 0,
'max_hints_delivery_threads': 0,
'memory_allocator': 0,
'memtable_allocation_type': 0,
'memtable_cleanup_threshold': 0,
'memtable_flush_writers': 0,
'memtable_heap_space_in_mb': 0,
'memtable_offheap_space_in_mb': 0,
'native_transport_max_concurrent_connections': 0,
'native_transport_max_concurrent_connections_per_ip': 0,
'native_transport_max_frame_size_in_mb': 0,
'native_transport_max_threads': 0,
'native_transport_port': 0,
'num_tokens': 0,
'partitioner': 0,
'permissions_validity_in_ms': 0,
'phi_convict_threshold': 0,
'range_request_timeout_in_ms': 0,
'read_request_timeout_in_ms': 0,
'request_scheduler': 0,
'request_scheduler_id': 0,
'request_scheduler_options': 0,
'request_timeout_in_ms': 0,
'row_cache_keys_to_save': 0,
'row_cache_save_period': 0,
'row_cache_size_in_mb': 0,
'rpc_address': 0,
'rpc_interface': 0,
'rpc_interface_prefer_ipv6': 0,
'rpc_keepalive': 0,
'rpc_max_threads': 0,
'rpc_min_threads': 0,
'rpc_port': 0,
'rpc_recv_buff_size_in_bytes': 0,
'rpc_send_buff_size_in_bytes': 0,
'rpc_server_type': 0,
'seed_provider': 0,
'server_encryption_options': 0,
'snapshot_before_compaction': 0,
'ssl_storage_port': 0,
'sstable_preemptive_open_interval_in_mb': 0,
'start_native_transport': 0,
'start_rpc': 0,
'storage_port': 0,
'stream_throughput_outbound_megabits_per_sec': 0,
'streaming_socket_timeout_in_ms': 0,
'thrift_framed_transport_size_in_mb': 0,
'tombstone_failure_threshold': 0,
'tombstone_warn_threshold': 0,
'trickle_fsync': 0,
'trickle_fsync_interval_in_kb': 0,
'truncate_request_timeout_in_ms': 0,
'write_request_timeout_in_ms': 0}
cassandra_data = {}
for key in data:
if key in cassandra_conf_items:
cassandra_data[key] = data[key]
with open(cassandra_conf_file, 'w') as f:
yaml.safe_dump(cassandra_data, f, default_flow_style=False)
def __update_yaml_dse(self):
raise NotImplementedError('ScyllaNode.__update_yaml_dse')
def _update_log4j(self):
raise NotImplementedError('ScyllaNode._update_log4j')
def __generate_server_xml(self):
raise NotImplementedError('ScyllaNode.__generate_server_xml')
def _get_directories(self):
dirs = {}
for i in ['data', 'commitlogs', 'bin', 'conf', 'logs', 'hints', 'view_hints']:
dirs[i] = os.path.join(self.get_path(), i)
return dirs
def _copy_agent(self):
raise NotImplementedError('ScyllaNode._copy_agent')
def _start_agent(self):
raise NotImplementedError('ScyllaNode._start_agent')
def _stop_agent(self):
raise NotImplementedError('ScyllaNode._stop_agent')
def _write_agent_address_yaml(self, agent_dir):
raise NotImplementedError('ScyllaNode._write_agent_address_yaml')
def _write_agent_log4j_properties(self, agent_dir):
raise NotImplementedError('ScyllaNode._write_agent_log4j_properties')
def _wait_no_pending_flushes(self, wait_timeout=60):
def no_pending_flushes():
stdout, _ = self.nodetool('cfstats')
pending_flushes = False
for line in stdout.splitlines():
line = line.strip()
if line.startswith('Pending flushes'):
_, pending_flushes_str = line.split(':')
pending_flushes_count = int(pending_flushes_str.strip())
if pending_flushes_count > 0:
pending_flushes = True
return not pending_flushes
result = wait_for(no_pending_flushes, timeout=wait_timeout, step=1.0)
if result is None:
raise NodeError("Node %s still has pending flushes after "
"%s seconds" % (self.name, wait_timeout))
def flush(self):
self.nodetool("flush")
self._wait_no_pending_flushes()
|
debug_data_multiplexer.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A wrapper around DebugDataReader used for retrieving tfdbg v2 data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
# Dummy run name for the debugger.
# Currently, the `DebuggerV2ExperimentMultiplexer` class is tied to a single
# logdir, which holds at most one DebugEvent file set in the tfdbg v2 (tfdbg2
# for short) format.
# TODO(cais): When tfdbg2 allows there to be multiple DebugEvent file sets in
# the same logdir, replace this magic string with actual run names.
DEFAULT_DEBUGGER_RUN_NAME = "__default_debugger_run__"
def _execution_digest_to_json(execution_digest):
# TODO(cais): Use the .to_json() method when avaiable.
return {
"wall_time": execution_digest.wall_time,
"op_type": execution_digest.op_type,
"output_tensor_device_ids": list(
execution_digest.output_tensor_device_ids
),
}
def run_in_background(target):
"""Run a target task in the background.
In the context of this module, `target` is the `update()` method of the
underlying reader for tfdbg2-format data.
This method is mocked by unit tests for deterministic behaviors during
testing.
Args:
target: The target task to run in the background, a callable with no args.
"""
# TODO(cais): Implement repetition with sleeping periods in between.
# TODO(cais): Add more unit tests in debug_data_multiplexer_test.py when the
# the behavior gets more complex.
thread = threading.Thread(target=target)
thread.start()
class DebuggerV2EventMultiplexer(object):
"""A class used for accessing tfdbg v2 DebugEvent data on local filesystem.
This class is a short-term hack, mirroring the EventMultiplexer for the main
TensorBoard plugins (e.g., scalar, histogram and graphs.) As such, it only
implements the methods relevant to the Debugger V2 pluggin.
TODO(cais): Integrate it with EventMultiplexer and use the integrated class
from MultiplexerDataProvider for a single path of accessing debugger and
non-debugger data.
"""
def __init__(self, logdir):
"""Constructor for the `DebugEventMultiplexer`.
Args:
logdir: Path to the directory to load the tfdbg v2 data from.
"""
self._logdir = logdir
self._reader = None
def FirstEventTimestamp(self, run):
"""Return the timestamp of the first DebugEvent of the given run.
This may perform I/O if no events have been loaded yet for the run.
Args:
run: A string name of the run for which the timestamp is retrieved.
This currently must be hardcoded as `DEFAULT_DEBUGGER_RUN_NAME`,
as each logdir contains at most one DebugEvent file set (i.e., a
run of a tfdbg2-instrumented TensorFlow program.)
Returns:
The wall_time of the first event of the run, which will be in seconds
since the epoch as a `float`.
"""
if self._reader is None:
raise ValueError("No tfdbg2 runs exists.")
if run != DEFAULT_DEBUGGER_RUN_NAME:
raise ValueError(
"Expected run name to be %s, but got %s"
% (DEFAULT_DEBUGGER_RUN_NAME, run)
)
return self._reader.starting_wall_time()
def PluginRunToTagToContent(self, plugin_name):
raise NotImplementedError(
"DebugDataMultiplexer.PluginRunToTagToContent() has not been "
"implemented yet."
)
def Runs(self):
"""Return all the run names in the `EventMultiplexer`.
The `Run()` method of this class is specialized for the tfdbg2-format
DebugEvent files. It only returns runs
Returns:
If tfdbg2-format data exists in the `logdir` of this object, returns:
```
{runName: { "debugger-v2": [tag1, tag2, tag3] } }
```
where `runName` is the hard-coded string `DEFAULT_DEBUGGER_RUN_NAME`
string. This is related to the fact that tfdbg2 currently contains
at most one DebugEvent file set per directory.
If no tfdbg2-format data exists in the `logdir`, an empty `dict`.
"""
if self._reader is None:
from tensorflow.python.debug.lib import debug_events_reader
try:
self._reader = debug_events_reader.DebugDataReader(self._logdir)
# NOTE(cais): Currently each logdir is enforced to have only one
# DebugEvent file set. So we add hard-coded default run name.
run_in_background(self._reader.update)
# TODO(cais): Start off a reading thread here, instead of being
# called only once here.
except AttributeError as error:
# Gracefully fail for users without the required API changes to
# debug_events_reader.DebugDataReader introduced in
# TF 2.1.0.dev20200103. This should be safe to remove when
# TF 2.2 is released.
return {}
except ValueError as error:
# When no DebugEvent file set is found in the logdir, a
# `ValueError` is thrown.
return {}
return {
DEFAULT_DEBUGGER_RUN_NAME: {
# TODO(cais): Add the semantically meaningful tag names such as
# 'execution_digests_book', 'alerts_book'
"debugger-v2": []
}
}
def ExecutionDigests(self, run, begin, end):
"""Get ExecutionDigests.
Args:
run: The tfdbg2 run to get `ExecutionDigest`s from.
begin: Beginning execution index.
end: Ending execution index.
Returns:
A JSON-serializable object containing the `ExecutionDigest`s and
related meta-information
"""
runs = self.Runs()
if run not in runs:
return None
# TODO(cais): For scalability, use begin and end kwargs when available in
# `DebugDataReader.execution()`.`
execution_digests = self._reader.executions(digest=True)
if begin < 0:
raise IndexError("Invalid begin index (%d)" % begin)
if end > len(execution_digests):
raise IndexError(
"end index (%d) out of bounds (%d)"
% (end, len(execution_digests))
)
if end >= 0 and end < begin:
raise ValueError(
"end index (%d) is unexpected less than begin index (%d)"
% (end, begin)
)
if end < 0: # This means all digests.
end = len(execution_digests)
return {
"begin": begin,
"end": end,
"num_digests": len(execution_digests),
"execution_digests": [
_execution_digest_to_json(digest)
for digest in execution_digests[begin:end]
],
}
def SourceFileList(self, run):
runs = self.Runs()
if run not in runs:
return None
# TODO(cais): Use public method `self._reader.source_files()` when available.
# pylint: disable=protected-access
return list(self._reader._host_name_file_path_to_offset.keys())
# pylint: enable=protected-access
def SourceLines(self, run, index):
runs = self.Runs()
if run not in runs:
return None
# TODO(cais): Use public method `self._reader.source_files()` when available.
# pylint: disable=protected-access
source_file_list = list(
self._reader._host_name_file_path_to_offset.keys()
)
# pylint: enable=protected-access
try:
host_name, file_path = source_file_list[index]
except IndexError:
raise IndexError("There is no source-code file at index %d" % index)
return {
"host_name": host_name,
"file_path": file_path,
"lines": self._reader.source_lines(host_name, file_path),
}
|
A3C_Transfer.py
|
import threading # 多线程
import tensorflow as tf
import gym
import os
import shutil
from utils import *
import pickle
import argparse
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
seed_setting = 600
parser = argparse.ArgumentParser()
parser.add_argument('--mode', default='test', type=str)
parser.add_argument('--env', type=str, default="MountainCar-v0")
parser.add_argument('--render', default=False)
parser.add_argument('--test_iteration', default=500, type=int)
parser.add_argument('--max_episode', default=4000, type=int) # num of games
parser.add_argument('--getting_data', default=True, type=bool)
parser.add_argument('--load', default=True, type=bool)
parser.add_argument('--mixed_version', default=True, type=bool)
parser.add_argument('--threshold', default=0.9, type=float)
args = parser.parse_args()
def load_dt(game):
if game == "Acrobot-v1":
f2 = open("decision_model/" + 'dt_Acrobot-v111.txt', 'rb')
elif game == "CartPole-v1-v1":
f2 = open("decision_model/" + 'dt_cartpole5.txt', 'rb')
elif game == "MountainCar-v0":
f2 = open("decision_model/" + 'dt_MountainCar7.txt', 'rb')
return f2
f2 = load_dt(args.env)
s2 = f2.read()
clf2 = pickle.loads(s2)
GAME = args.env
if args.mode == "train":
TRAIN = True
else:
TRAIN = False
TEST_RENDER = args.render
mixed_version = args.mixed_version
OUTPUT_GRAPH = False # tensorboards
LOG_DIR = './log'
if TRAIN:
MAX_GLOBAL_EP = args.max_episode # total episodes
else:
MAX_GLOBAL_EP = args.test_iteration # total episodes
# HyperParams setting
N_WORKERS = 16
THRESHOLD = args.threshold
MAX_ROUND_EP = 10000 # max steps each episode
GLOBAL_NET_SCOPE = 'Global_Net'
UPDATE_GLOBAL_ITER = 10
GAMMA = 0.99
ENTROPY_BETA = 0.001
LR_A = 0.0001 # learning rate for actor
LR_C = 0.001 # learning rate for critic
GLOBAL_RUNNING_R = []
GLOBAL_RUNNING_STEP = []
A3C_interrupt = []
GLOBAL_EP = 0
start = time.time()
env = gym.make(GAME)
env.seed(seed_setting)
N_S = env.observation_space.shape[0]
N_A = env.action_space.n
class ACNet(object):
def __init__(self, scope, globalAC=None):
if scope == GLOBAL_NET_SCOPE: # GLOBAL net
with tf.variable_scope(scope):
self.s = tf.placeholder(tf.float32, [None, N_S], 'S')
# return actor and critic's params under the scope of GLOBAL
self.a_params, self.c_params = self._build_net(scope)[-2:]
else: # local net, calculate losses
with tf.variable_scope(scope):
# s, a_his, v_target are designed to
# collect interactive data
self.s = tf.placeholder(tf.float32, [None, N_S], 'S')
# the output of Actor net
self.a_his = tf.placeholder(tf.int32, [None, ], 'A')
self.v_target = tf.placeholder(
tf.float32, [None, 1], 'Vtarget')
# return params under the scope of xx找
self.a_prob, self.v, self.a_params, self.c_params = self._build_net(
scope)
self.td = tf.subtract(self.v_target, self.v, name='TD_error')
# calculate c_loss for minimizing td_error
with tf.name_scope('c_loss'):
# c_loss = reduce_mean(td^2), promise to be 0
self.c_loss = tf.reduce_mean(tf.square(self.td))
# calculate a_loss for maximizing expectation
with tf.name_scope('a_loss'):
# discrete version
self.log_prob = tf.reduce_sum(
tf.log(
self.a_prob +
1e-5) *
tf.one_hot(
self.a_his,
N_A,
dtype=tf.float32),
axis=1,
keepdims=True)
# 原本为TensorFlow计算图中的一个op(节点)转为一个常量td,
# 这时候对于loss的求导反传就不会传到td去了.
exp_v = self.log_prob * tf.stop_gradient(self.td)
# 我们为了使得输出的分布更加均衡,所以要最大化这个entropy,那么就是
# minimize这个负的entropy。
entropy = -tf.reduce_sum(self.a_prob * tf.log(self.a_prob + 1e-5),
axis=1, keepdims=True) # encourage exploration
self.exp_v = ENTROPY_BETA * entropy + exp_v
self.a_loss = tf.reduce_mean(-self.exp_v)
# calculate local_grad
with tf.name_scope('local_grad'):
# tf.gradients(ys, xs, grad_ys=None, name='gradients',stop_gradients=None,)
# tf.gradients = tf.compute_gradient
self.a_grads = tf.gradients(self.a_loss, self.a_params)
self.c_grads = tf.gradients(self.c_loss, self.c_params)
with tf.name_scope('sync'):
with tf.name_scope('pull'):
# l_p: local parameters, g_p: global parameters
# 把全局参数直接给assign 给局部参数
# 使用zip函数实际为optimizer.compute_gradient和apply_gradient 需要
self.pull_a_params_op = [
l_p.assign(g_p) for l_p, g_p in zip(
self.a_params, globalAC.a_params)]
self.pull_c_params_op = [
l_p.assign(g_p) for l_p, g_p in zip(
self.c_params, globalAC.c_params)]
with tf.name_scope('push'):
# 梯度实际上已经被计算,因此只要定义优化算法和使用apply_gradient即可
OPT_A = tf.train.RMSPropOptimizer(LR_A, name='RMSPropA')
OPT_C = tf.train.RMSPropOptimizer(LR_C, name='RMSPropC')
self.update_a_op = OPT_A.apply_gradients(
zip(self.a_grads, globalAC.a_params))
self.update_c_op = OPT_C.apply_gradients(
zip(self.c_grads, globalAC.c_params))
def _build_net(self, scope):
w_init = tf.random_normal_initializer(0., .1)
# actor network, 2 layers
with tf.variable_scope('actor'):
l_a = tf.layers.dense(
self.s,
200,
tf.nn.relu6,
kernel_initializer=w_init,
name='la')
a_prob = tf.layers.dense(
l_a,
N_A,
tf.nn.softmax,
kernel_initializer=w_init,
name='ap')
# critic network, 2 layers
with tf.variable_scope('critic'):
l_c = tf.layers.dense(
self.s,
100,
tf.nn.relu6,
kernel_initializer=w_init,
name='lc')
v = tf.layers.dense(
l_c,
1,
kernel_initializer=w_init,
name='v') # state value
# tf.get_collection(key, scope=None)用来获取名称域中所有放入‘key’的变量的列表
a_params = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES,
scope=scope + '/actor')
c_params = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES,
scope=scope + '/critic')
return a_prob, v, a_params, c_params
def choose_action(self, s): # run by a local
prob_weights = SESS.run(self.a_prob, feed_dict={
self.s: s[np.newaxis, :]})
prob_weights /= prob_weights.sum() # normalize
if TRAIN is False:
action = np.argmax(prob_weights)
else:
action = np.random.choice(
range(
prob_weights.shape[1]),
p=prob_weights.ravel())
return action
def update_global(self, feed_dict): # local grads applies to global net
SESS.run([self.update_a_op, self.update_c_op], feed_dict)
def pull_global(self): # run by a local
SESS.run([self.pull_a_params_op, self.pull_c_params_op])
class Worker(object):
def __init__(self, name, globalAC):
self.env = gym.make(GAME).unwrapped
self.name = name
self.AC = ACNet(name, globalAC)
def work(self):
global GLOBAL_RUNNING_R, GLOBAL_EP
total_step = 1
buffer_s, buffer_a, buffer_r = [], [], []
while not COORD.should_stop() and GLOBAL_EP < MAX_GLOBAL_EP:
s = self.env.reset()
ep_r = 0
j = 0
with_A3C = 0
while TRAIN:
if self.name == 'W_0':
if TEST_RENDER:
self.env.render()
# mixed version
s_nexis = s[np.newaxis, :]
prob = clf2.predict_proba(s_nexis)
if np.max(prob) > THRESHOLD:
a = np.squeeze(clf2.predict(s_nexis))
s_, r, done, info = self.env.step(a)
# buffer_s.append(s_)
# buffer_a.append(a)
# buffer_r.append(r)
ep_r += r
else:
a = self.AC.choose_action(s)
with_A3C += 1
s_, r, done, info = self.env.step(a)
ep_r += r
buffer_s.append(s_)
buffer_a.append(a)
buffer_r.append(r)
# update global and assign to local net
if (with_A3C % UPDATE_GLOBAL_ITER ==
0 or done) and (len(buffer_a) > 0):
if done:
v_s_ = 0 # terminal
else:
# [0,0] is for getting a number instead of a matrix
v_s_ = SESS.run(
self.AC.v, {
self.AC.s: s_[
np.newaxis, :]})[0, 0]
buffer_v_target = []
for r in buffer_r[::-1]: # reverse buffer r
v_s_ = r + GAMMA * v_s_
buffer_v_target.append(v_s_)
buffer_v_target.reverse()
buffer_s, buffer_a, buffer_v_target = np.vstack(
buffer_s), np.array(buffer_a), np.vstack(buffer_v_target)
feed_dict = {
self.AC.s: buffer_s,
self.AC.a_his: buffer_a,
self.AC.v_target: buffer_v_target,
}
# print(SESS.run(self.AC.v_target, {self.AC.v_target: buffer_v_target}))
self.AC.update_global(feed_dict)
buffer_s, buffer_a, buffer_r = [], [], []
self.AC.pull_global()
s = s_
total_step += 1
j += 1
if done or j > MAX_ROUND_EP:
GLOBAL_RUNNING_STEP.append(j)
GLOBAL_RUNNING_R.append(ep_r)
print(
self.name,
"Ep:", GLOBAL_EP,
"| Ep_r: %i" % GLOBAL_RUNNING_R[-1],
)
print_time_information(
start, GLOBAL_EP=GLOBAL_EP, MAX_GLOBAL_EP=MAX_GLOBAL_EP)
A3C_interrupt.append(with_A3C)
GLOBAL_EP += 1
break
# if TRAIN:
# # 控制训练时倒数20次都达到最大值附近即停止训练
# # flag = GLOBAL_RUNNING_R[-20:]
# # if (np.array(GLOBAL_RUNNING_R[-60:]) > 8000).all():
# # break
while not TRAIN:
if TEST_RENDER:
self.env.render()
s_nexis = s[np.newaxis, :]
if mixed_version is True:
# # mixed version
prob = clf2.predict_proba(s_nexis)
if np.max(prob) > 0.95:
a = np.squeeze(clf2.predict(s_nexis))
else:
a = self.AC.choose_action(s)
with_A3C += 1
# # simple version
else:
a = np.squeeze(clf2.predict(s_nexis))
s_, r, done, info = self.env.step(a)
ep_r += r
s = s_
total_step += 1
j += 1
if done or j > MAX_ROUND_EP:
GLOBAL_RUNNING_STEP.append(j)
GLOBAL_RUNNING_R.append(ep_r)
print(
self.name,
"Ep:", GLOBAL_EP,
"| Ep_r: %i" % GLOBAL_RUNNING_R[-1],
)
print_time_information(
start, GLOBAL_EP=GLOBAL_EP, MAX_GLOBAL_EP=MAX_GLOBAL_EP)
print(with_A3C)
A3C_interrupt.append(with_A3C)
GLOBAL_EP += 1
break
def main():
if TRAIN:
with tf.device("/cpu:0"):
GLOBAL_AC = ACNet(GLOBAL_NET_SCOPE) # we only need its params
workers = []
# Create worker
for i in range(N_WORKERS):
i_name = 'W_%i' % i # worker name
workers.append(Worker(i_name, GLOBAL_AC))
SESS.run(tf.global_variables_initializer())
saver = tf.train.Saver()
if OUTPUT_GRAPH:
if os.path.exists(LOG_DIR):
shutil.rmtree(LOG_DIR)
tf.summary.FileWriter(LOG_DIR, SESS.graph)
worker_threads = []
# loop main
for worker in workers:
def job(): return worker.work()
t = threading.Thread(target=job)
t.start()
worker_threads.append(t)
COORD.join(worker_threads)
save_path = saver.save(SESS, path + "/save_net.ckpt")
print("model has saved in", save_path)
plt_reward_step(
GLOBAL_RUNNING_R=GLOBAL_RUNNING_R,
GLOBAL_RUNNING_STEP=GLOBAL_RUNNING_STEP,
title=GAME)
average_A3C = np.mean(A3C_interrupt)
print('average_A3C', average_A3C)
else:
with tf.device("/cpu:0"):
GLOBAL_AC = ACNet(GLOBAL_NET_SCOPE) # we only need its params
workers = []
# Create worker
for i in range(1):
i_name = 'W_%i' % i # worker name
workers.append(Worker(i_name, GLOBAL_AC))
saver = tf.train.Saver()
saver.restore(SESS, path + "/save_net.ckpt")
# SESS.run(tf.global_variables_initializer())
if OUTPUT_GRAPH:
if os.path.exists(LOG_DIR):
shutil.rmtree(LOG_DIR)
tf.summary.FileWriter(LOG_DIR, SESS.graph)
worker_threads = []
for worker in workers:
def job(): return worker.work()
t = threading.Thread(target=job)
t.start()
worker_threads.append(t)
COORD.join(worker_threads)
plt_reward_step(
GLOBAL_RUNNING_R=GLOBAL_RUNNING_R,
GLOBAL_RUNNING_STEP=GLOBAL_RUNNING_STEP,
title=GAME)
sum_A3C = np.sum(A3C_interrupt)
print('sum_A3C', sum_A3C)
if __name__ == "__main__":
SESS = tf.Session()
COORD = tf.train.Coordinator() # 设为全局变量
path = "A3C/" + GAME
if os.path.exists(path) is False:
root_path = os.getcwd()
os.mkdir(root_path+path)
main()
sum_A3C = np.sum(A3C_interrupt)
print('sum_A3C', sum_A3C)
sum_step = np.sum(GLOBAL_RUNNING_STEP)
print('avg_interrupt', sum_A3C * 100 / sum_step)
total_time = time.time() - start
print('avg_time', total_time * 100 / sum_step)
|
tests.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for PySpark; additional tests are implemented as doctests in
individual modules.
"""
from array import array
from fileinput import input
from glob import glob
import os
import re
import shutil
import subprocess
import sys
import tempfile
import time
import zipfile
import random
from platform import python_implementation
if sys.version_info[:2] <= (2, 6):
import unittest2 as unittest
else:
import unittest
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
from pyspark.files import SparkFiles
from pyspark.serializers import read_int, BatchedSerializer, MarshalSerializer, PickleSerializer, \
CloudPickleSerializer
from pyspark.shuffle import Aggregator, InMemoryMerger, ExternalMerger, ExternalSorter
from pyspark.sql import SQLContext, IntegerType, Row
from pyspark import shuffle
_have_scipy = False
_have_numpy = False
try:
import scipy.sparse
_have_scipy = True
except:
# No SciPy, but that's okay, we'll skip those tests
pass
try:
import numpy as np
_have_numpy = True
except:
# No NumPy, but that's okay, we'll skip those tests
pass
SPARK_HOME = os.environ["SPARK_HOME"]
class TestMerger(unittest.TestCase):
def setUp(self):
self.N = 1 << 16
self.l = [i for i in xrange(self.N)]
self.data = zip(self.l, self.l)
self.agg = Aggregator(lambda x: [x],
lambda x, y: x.append(y) or x,
lambda x, y: x.extend(y) or x)
def test_in_memory(self):
m = InMemoryMerger(self.agg)
m.mergeValues(self.data)
self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
sum(xrange(self.N)))
m = InMemoryMerger(self.agg)
m.mergeCombiners(map(lambda (x, y): (x, [y]), self.data))
self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
sum(xrange(self.N)))
def test_small_dataset(self):
m = ExternalMerger(self.agg, 1000)
m.mergeValues(self.data)
self.assertEqual(m.spills, 0)
self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
sum(xrange(self.N)))
m = ExternalMerger(self.agg, 1000)
m.mergeCombiners(map(lambda (x, y): (x, [y]), self.data))
self.assertEqual(m.spills, 0)
self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
sum(xrange(self.N)))
def test_medium_dataset(self):
m = ExternalMerger(self.agg, 10)
m.mergeValues(self.data)
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
sum(xrange(self.N)))
m = ExternalMerger(self.agg, 10)
m.mergeCombiners(map(lambda (x, y): (x, [y]), self.data * 3))
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
sum(xrange(self.N)) * 3)
def test_huge_dataset(self):
m = ExternalMerger(self.agg, 10)
m.mergeCombiners(map(lambda (k, v): (k, [str(v)]), self.data * 10))
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(len(v) for k, v in m._recursive_merged_items(0)),
self.N * 10)
m._cleanup()
class TestSorter(unittest.TestCase):
def test_in_memory_sort(self):
l = range(1024)
random.shuffle(l)
sorter = ExternalSorter(1024)
self.assertEquals(sorted(l), list(sorter.sorted(l)))
self.assertEquals(sorted(l, reverse=True), list(sorter.sorted(l, reverse=True)))
self.assertEquals(sorted(l, key=lambda x: -x), list(sorter.sorted(l, key=lambda x: -x)))
self.assertEquals(sorted(l, key=lambda x: -x, reverse=True),
list(sorter.sorted(l, key=lambda x: -x, reverse=True)))
def test_external_sort(self):
l = range(1024)
random.shuffle(l)
sorter = ExternalSorter(1)
self.assertEquals(sorted(l), list(sorter.sorted(l)))
self.assertGreater(shuffle.DiskBytesSpilled, 0)
last = shuffle.DiskBytesSpilled
self.assertEquals(sorted(l, reverse=True), list(sorter.sorted(l, reverse=True)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
last = shuffle.DiskBytesSpilled
self.assertEquals(sorted(l, key=lambda x: -x), list(sorter.sorted(l, key=lambda x: -x)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
last = shuffle.DiskBytesSpilled
self.assertEquals(sorted(l, key=lambda x: -x, reverse=True),
list(sorter.sorted(l, key=lambda x: -x, reverse=True)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
def test_external_sort_in_rdd(self):
conf = SparkConf().set("spark.python.worker.memory", "1m")
sc = SparkContext(conf=conf)
l = range(10240)
random.shuffle(l)
rdd = sc.parallelize(l, 10)
self.assertEquals(sorted(l), rdd.sortBy(lambda x: x).collect())
sc.stop()
class SerializationTestCase(unittest.TestCase):
def test_namedtuple(self):
from collections import namedtuple
from cPickle import dumps, loads
P = namedtuple("P", "x y")
p1 = P(1, 3)
p2 = loads(dumps(p1, 2))
self.assertEquals(p1, p2)
def test_itemgetter(self):
from operator import itemgetter
ser = CloudPickleSerializer()
d = range(10)
getter = itemgetter(1)
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = itemgetter(0, 3)
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
def test_attrgetter(self):
from operator import attrgetter
ser = CloudPickleSerializer()
class C(object):
def __getattr__(self, item):
return item
d = C()
getter = attrgetter("a")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = attrgetter("a", "b")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
d.e = C()
getter = attrgetter("e.a")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = attrgetter("e.a", "e.b")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
# Regression test for SPARK-3415
def test_pickling_file_handles(self):
ser = CloudPickleSerializer()
out1 = sys.stderr
out2 = ser.loads(ser.dumps(out1))
self.assertEquals(out1, out2)
def test_func_globals(self):
class Unpicklable(object):
def __reduce__(self):
raise Exception("not picklable")
global exit
exit = Unpicklable()
ser = CloudPickleSerializer()
self.assertRaises(Exception, lambda: ser.dumps(exit))
def foo():
sys.exit(0)
self.assertTrue("exit" in foo.func_code.co_names)
ser.dumps(foo)
class PySparkTestCase(unittest.TestCase):
def setUp(self):
self._old_sys_path = list(sys.path)
class_name = self.__class__.__name__
self.sc = SparkContext('local[4]', class_name, batchSize=2)
def tearDown(self):
self.sc.stop()
sys.path = self._old_sys_path
class TestCheckpoint(PySparkTestCase):
def setUp(self):
PySparkTestCase.setUp(self)
self.checkpointDir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.checkpointDir.name)
self.sc.setCheckpointDir(self.checkpointDir.name)
def tearDown(self):
PySparkTestCase.tearDown(self)
shutil.rmtree(self.checkpointDir.name)
def test_basic_checkpointing(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: range(1, x + 1))
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
result = flatMappedRDD.collect()
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.isCheckpointed())
self.assertEqual(flatMappedRDD.collect(), result)
self.assertEqual("file:" + self.checkpointDir.name,
os.path.dirname(os.path.dirname(flatMappedRDD.getCheckpointFile())))
def test_checkpoint_and_restore(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: [x])
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
flatMappedRDD.count() # forces a checkpoint to be computed
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.getCheckpointFile() is not None)
recovered = self.sc._checkpointFile(flatMappedRDD.getCheckpointFile(),
flatMappedRDD._jrdd_deserializer)
self.assertEquals([1, 2, 3, 4], recovered.collect())
class TestAddFile(PySparkTestCase):
def test_add_py_file(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this job fails due to `userlibrary` not being on the Python path:
# disable logging in log4j temporarily
log4j = self.sc._jvm.org.apache.log4j
old_level = log4j.LogManager.getRootLogger().getLevel()
log4j.LogManager.getRootLogger().setLevel(log4j.Level.FATAL)
def func(x):
from userlibrary import UserClass
return UserClass().hello()
self.assertRaises(Exception,
self.sc.parallelize(range(2)).map(func).first)
log4j.LogManager.getRootLogger().setLevel(old_level)
# Add the file, so the job should now succeed:
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addPyFile(path)
res = self.sc.parallelize(range(2)).map(func).first()
self.assertEqual("Hello World!", res)
def test_add_file_locally(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
self.sc.addFile(path)
download_path = SparkFiles.get("hello.txt")
self.assertNotEqual(path, download_path)
with open(download_path) as test_file:
self.assertEquals("Hello World!\n", test_file.readline())
def test_add_py_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlibrary import UserClass
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addFile(path)
from userlibrary import UserClass
self.assertEqual("Hello World!", UserClass().hello())
def test_add_egg_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlib import UserClass
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlib-0.1-py2.7.egg")
self.sc.addPyFile(path)
from userlib import UserClass
self.assertEqual("Hello World from inside a package!", UserClass().hello())
def test_overwrite_system_module(self):
self.sc.addPyFile(os.path.join(SPARK_HOME, "python/test_support/SimpleHTTPServer.py"))
import SimpleHTTPServer
self.assertEqual("My Server", SimpleHTTPServer.__name__)
def func(x):
import SimpleHTTPServer
return SimpleHTTPServer.__name__
self.assertEqual(["My Server"], self.sc.parallelize(range(1)).map(func).collect())
class TestRDDFunctions(PySparkTestCase):
def test_id(self):
rdd = self.sc.parallelize(range(10))
id = rdd.id()
self.assertEqual(id, rdd.id())
rdd2 = rdd.map(str).filter(bool)
id2 = rdd2.id()
self.assertEqual(id + 1, id2)
self.assertEqual(id2, rdd2.id())
def test_failed_sparkcontext_creation(self):
# Regression test for SPARK-1550
self.sc.stop()
self.assertRaises(Exception, lambda: SparkContext("an-invalid-master-name"))
self.sc = SparkContext("local")
def test_save_as_textfile_with_unicode(self):
# Regression test for SPARK-970
x = u"\u00A1Hola, mundo!"
data = self.sc.parallelize([x])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsTextFile(tempFile.name)
raw_contents = ''.join(input(glob(tempFile.name + "/part-0000*")))
self.assertEqual(x, unicode(raw_contents.strip(), "utf-8"))
def test_save_as_textfile_with_utf8(self):
x = u"\u00A1Hola, mundo!"
data = self.sc.parallelize([x.encode("utf-8")])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsTextFile(tempFile.name)
raw_contents = ''.join(input(glob(tempFile.name + "/part-0000*")))
self.assertEqual(x, unicode(raw_contents.strip(), "utf-8"))
def test_transforming_cartesian_result(self):
# Regression test for SPARK-1034
rdd1 = self.sc.parallelize([1, 2])
rdd2 = self.sc.parallelize([3, 4])
cart = rdd1.cartesian(rdd2)
result = cart.map(lambda (x, y): x + y).collect()
def test_transforming_pickle_file(self):
# Regression test for SPARK-2601
data = self.sc.parallelize(["Hello", "World!"])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsPickleFile(tempFile.name)
pickled_file = self.sc.pickleFile(tempFile.name)
pickled_file.map(lambda x: x).collect()
def test_cartesian_on_textfile(self):
# Regression test for
path = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
a = self.sc.textFile(path)
result = a.cartesian(a).collect()
(x, y) = result[0]
self.assertEqual("Hello World!", x.strip())
self.assertEqual("Hello World!", y.strip())
def test_deleting_input_files(self):
# Regression test for SPARK-1025
tempFile = tempfile.NamedTemporaryFile(delete=False)
tempFile.write("Hello World!")
tempFile.close()
data = self.sc.textFile(tempFile.name)
filtered_data = data.filter(lambda x: True)
self.assertEqual(1, filtered_data.count())
os.unlink(tempFile.name)
self.assertRaises(Exception, lambda: filtered_data.count())
def testAggregateByKey(self):
data = self.sc.parallelize([(1, 1), (1, 1), (3, 2), (5, 1), (5, 3)], 2)
def seqOp(x, y):
x.add(y)
return x
def combOp(x, y):
x |= y
return x
sets = dict(data.aggregateByKey(set(), seqOp, combOp).collect())
self.assertEqual(3, len(sets))
self.assertEqual(set([1]), sets[1])
self.assertEqual(set([2]), sets[3])
self.assertEqual(set([1, 3]), sets[5])
def test_itemgetter(self):
rdd = self.sc.parallelize([range(10)])
from operator import itemgetter
self.assertEqual([1], rdd.map(itemgetter(1)).collect())
self.assertEqual([(2, 3)], rdd.map(itemgetter(2, 3)).collect())
def test_namedtuple_in_rdd(self):
from collections import namedtuple
Person = namedtuple("Person", "id firstName lastName")
jon = Person(1, "Jon", "Doe")
jane = Person(2, "Jane", "Doe")
theDoes = self.sc.parallelize([jon, jane])
self.assertEquals([jon, jane], theDoes.collect())
def test_large_broadcast(self):
N = 100000
data = [[float(i) for i in range(300)] for i in range(N)]
bdata = self.sc.broadcast(data) # 270MB
m = self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum()
self.assertEquals(N, m)
def test_large_closure(self):
N = 1000000
data = [float(i) for i in xrange(N)]
m = self.sc.parallelize(range(1), 1).map(lambda x: len(data)).sum()
self.assertEquals(N, m)
def test_zip_with_different_serializers(self):
a = self.sc.parallelize(range(5))
b = self.sc.parallelize(range(100, 105))
self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)])
a = a._reserialize(BatchedSerializer(PickleSerializer(), 2))
b = b._reserialize(MarshalSerializer())
self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)])
def test_zip_with_different_number_of_items(self):
a = self.sc.parallelize(range(5), 2)
# different number of partitions
b = self.sc.parallelize(range(100, 106), 3)
self.assertRaises(ValueError, lambda: a.zip(b))
# different number of batched items in JVM
b = self.sc.parallelize(range(100, 104), 2)
self.assertRaises(Exception, lambda: a.zip(b).count())
# different number of items in one pair
b = self.sc.parallelize(range(100, 106), 2)
self.assertRaises(Exception, lambda: a.zip(b).count())
# same total number of items, but different distributions
a = self.sc.parallelize([2, 3], 2).flatMap(range)
b = self.sc.parallelize([3, 2], 2).flatMap(range)
self.assertEquals(a.count(), b.count())
self.assertRaises(Exception, lambda: a.zip(b).count())
def test_count_approx_distinct(self):
rdd = self.sc.parallelize(range(1000))
self.assertTrue(950 < rdd.countApproxDistinct(0.04) < 1050)
self.assertTrue(950 < rdd.map(float).countApproxDistinct(0.04) < 1050)
self.assertTrue(950 < rdd.map(str).countApproxDistinct(0.04) < 1050)
self.assertTrue(950 < rdd.map(lambda x: (x, -x)).countApproxDistinct(0.04) < 1050)
rdd = self.sc.parallelize([i % 20 for i in range(1000)], 7)
self.assertTrue(18 < rdd.countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(float).countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(str).countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(lambda x: (x, -x)).countApproxDistinct() < 22)
self.assertRaises(ValueError, lambda: rdd.countApproxDistinct(0.00000001))
self.assertRaises(ValueError, lambda: rdd.countApproxDistinct(0.5))
def test_histogram(self):
# empty
rdd = self.sc.parallelize([])
self.assertEquals([0], rdd.histogram([0, 10])[1])
self.assertEquals([0, 0], rdd.histogram([0, 4, 10])[1])
self.assertRaises(ValueError, lambda: rdd.histogram(1))
# out of range
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEquals([0], rdd.histogram([0, 10])[1])
self.assertEquals([0, 0], rdd.histogram((0, 4, 10))[1])
# in range with one bucket
rdd = self.sc.parallelize(range(1, 5))
self.assertEquals([4], rdd.histogram([0, 10])[1])
self.assertEquals([3, 1], rdd.histogram([0, 4, 10])[1])
# in range with one bucket exact match
self.assertEquals([4], rdd.histogram([1, 4])[1])
# out of range with two buckets
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEquals([0, 0], rdd.histogram([0, 5, 10])[1])
# out of range with two uneven buckets
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEquals([0, 0], rdd.histogram([0, 4, 10])[1])
# in range with two buckets
rdd = self.sc.parallelize([1, 2, 3, 5, 6])
self.assertEquals([3, 2], rdd.histogram([0, 5, 10])[1])
# in range with two bucket and None
rdd = self.sc.parallelize([1, 2, 3, 5, 6, None, float('nan')])
self.assertEquals([3, 2], rdd.histogram([0, 5, 10])[1])
# in range with two uneven buckets
rdd = self.sc.parallelize([1, 2, 3, 5, 6])
self.assertEquals([3, 2], rdd.histogram([0, 5, 11])[1])
# mixed range with two uneven buckets
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.0, 11.01])
self.assertEquals([4, 3], rdd.histogram([0, 5, 11])[1])
# mixed range with four uneven buckets
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0, 199.0, 200.0, 200.1])
self.assertEquals([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1])
# mixed range with uneven buckets and NaN
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0,
199.0, 200.0, 200.1, None, float('nan')])
self.assertEquals([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1])
# out of range with infinite buckets
rdd = self.sc.parallelize([10.01, -0.01, float('nan'), float("inf")])
self.assertEquals([1, 2], rdd.histogram([float('-inf'), 0, float('inf')])[1])
# invalid buckets
self.assertRaises(ValueError, lambda: rdd.histogram([]))
self.assertRaises(ValueError, lambda: rdd.histogram([1]))
self.assertRaises(ValueError, lambda: rdd.histogram(0))
self.assertRaises(TypeError, lambda: rdd.histogram({}))
# without buckets
rdd = self.sc.parallelize(range(1, 5))
self.assertEquals(([1, 4], [4]), rdd.histogram(1))
# without buckets single element
rdd = self.sc.parallelize([1])
self.assertEquals(([1, 1], [1]), rdd.histogram(1))
# without bucket no range
rdd = self.sc.parallelize([1] * 4)
self.assertEquals(([1, 1], [4]), rdd.histogram(1))
# without buckets basic two
rdd = self.sc.parallelize(range(1, 5))
self.assertEquals(([1, 2.5, 4], [2, 2]), rdd.histogram(2))
# without buckets with more requested than elements
rdd = self.sc.parallelize([1, 2])
buckets = [1 + 0.2 * i for i in range(6)]
hist = [1, 0, 0, 0, 1]
self.assertEquals((buckets, hist), rdd.histogram(5))
# invalid RDDs
rdd = self.sc.parallelize([1, float('inf')])
self.assertRaises(ValueError, lambda: rdd.histogram(2))
rdd = self.sc.parallelize([float('nan')])
self.assertRaises(ValueError, lambda: rdd.histogram(2))
# string
rdd = self.sc.parallelize(["ab", "ac", "b", "bd", "ef"], 2)
self.assertEquals([2, 2], rdd.histogram(["a", "b", "c"])[1])
self.assertEquals((["ab", "ef"], [5]), rdd.histogram(1))
self.assertRaises(TypeError, lambda: rdd.histogram(2))
# mixed RDD
rdd = self.sc.parallelize([1, 4, "ab", "ac", "b"], 2)
self.assertEquals([1, 1], rdd.histogram([0, 4, 10])[1])
self.assertEquals([2, 1], rdd.histogram(["a", "b", "c"])[1])
self.assertEquals(([1, "b"], [5]), rdd.histogram(1))
self.assertRaises(TypeError, lambda: rdd.histogram(2))
def test_repartitionAndSortWithinPartitions(self):
rdd = self.sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)], 2)
repartitioned = rdd.repartitionAndSortWithinPartitions(2, lambda key: key % 2)
partitions = repartitioned.glom().collect()
self.assertEquals(partitions[0], [(0, 5), (0, 8), (2, 6)])
self.assertEquals(partitions[1], [(1, 3), (3, 8), (3, 8)])
def test_distinct(self):
rdd = self.sc.parallelize((1, 2, 3)*10, 10)
self.assertEquals(rdd.getNumPartitions(), 10)
self.assertEquals(rdd.distinct().count(), 3)
result = rdd.distinct(5)
self.assertEquals(result.getNumPartitions(), 5)
self.assertEquals(result.count(), 3)
class TestSQL(PySparkTestCase):
def setUp(self):
PySparkTestCase.setUp(self)
self.sqlCtx = SQLContext(self.sc)
def test_udf(self):
self.sqlCtx.registerFunction("twoArgs", lambda x, y: len(x) + y, IntegerType())
[row] = self.sqlCtx.sql("SELECT twoArgs('test', 1)").collect()
self.assertEqual(row[0], 5)
def test_broadcast_in_udf(self):
bar = {"a": "aa", "b": "bb", "c": "abc"}
foo = self.sc.broadcast(bar)
self.sqlCtx.registerFunction("MYUDF", lambda x: foo.value[x] if x else '')
[res] = self.sqlCtx.sql("SELECT MYUDF('c')").collect()
self.assertEqual("abc", res[0])
[res] = self.sqlCtx.sql("SELECT MYUDF('')").collect()
self.assertEqual("", res[0])
def test_basic_functions(self):
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
srdd = self.sqlCtx.jsonRDD(rdd)
srdd.count()
srdd.collect()
srdd.schemaString()
srdd.schema()
# cache and checkpoint
self.assertFalse(srdd.is_cached)
srdd.persist()
srdd.unpersist()
srdd.cache()
self.assertTrue(srdd.is_cached)
self.assertFalse(srdd.isCheckpointed())
self.assertEqual(None, srdd.getCheckpointFile())
srdd = srdd.coalesce(2, True)
srdd = srdd.repartition(3)
srdd = srdd.distinct()
srdd.intersection(srdd)
self.assertEqual(2, srdd.count())
srdd.registerTempTable("temp")
srdd = self.sqlCtx.sql("select foo from temp")
srdd.count()
srdd.collect()
def test_distinct(self):
rdd = self.sc.parallelize(['{"a": 1}', '{"b": 2}', '{"c": 3}']*10, 10)
srdd = self.sqlCtx.jsonRDD(rdd)
self.assertEquals(srdd.getNumPartitions(), 10)
self.assertEquals(srdd.distinct().count(), 3)
result = srdd.distinct(5)
self.assertEquals(result.getNumPartitions(), 5)
self.assertEquals(result.count(), 3)
def test_apply_schema_to_row(self):
srdd = self.sqlCtx.jsonRDD(self.sc.parallelize(["""{"a":2}"""]))
srdd2 = self.sqlCtx.applySchema(srdd.map(lambda x: x), srdd.schema())
self.assertEqual(srdd.collect(), srdd2.collect())
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x))
srdd3 = self.sqlCtx.applySchema(rdd, srdd.schema())
self.assertEqual(10, srdd3.count())
class TestIO(PySparkTestCase):
def test_stdout_redirection(self):
import subprocess
def func(x):
subprocess.check_call('ls', shell=True)
self.sc.parallelize([1]).foreach(func)
class TestInputFormat(PySparkTestCase):
def setUp(self):
PySparkTestCase.setUp(self)
self.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.tempdir.name)
self.sc._jvm.WriteInputFormatTestDataGenerator.generateData(self.tempdir.name, self.sc._jsc)
def tearDown(self):
PySparkTestCase.tearDown(self)
shutil.rmtree(self.tempdir.name)
def test_sequencefiles(self):
basepath = self.tempdir.name
ints = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfint/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
doubles = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfdouble/",
"org.apache.hadoop.io.DoubleWritable",
"org.apache.hadoop.io.Text").collect())
ed = [(1.0, u'aa'), (1.0, u'aa'), (2.0, u'aa'), (2.0, u'bb'), (2.0, u'bb'), (3.0, u'cc')]
self.assertEqual(doubles, ed)
bytes = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfbytes/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BytesWritable").collect())
ebs = [(1, bytearray('aa', 'utf-8')),
(1, bytearray('aa', 'utf-8')),
(2, bytearray('aa', 'utf-8')),
(2, bytearray('bb', 'utf-8')),
(2, bytearray('bb', 'utf-8')),
(3, bytearray('cc', 'utf-8'))]
self.assertEqual(bytes, ebs)
text = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sftext/",
"org.apache.hadoop.io.Text",
"org.apache.hadoop.io.Text").collect())
et = [(u'1', u'aa'),
(u'1', u'aa'),
(u'2', u'aa'),
(u'2', u'bb'),
(u'2', u'bb'),
(u'3', u'cc')]
self.assertEqual(text, et)
bools = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfbool/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BooleanWritable").collect())
eb = [(1, False), (1, True), (2, False), (2, False), (2, True), (3, True)]
self.assertEqual(bools, eb)
nulls = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfnull/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BooleanWritable").collect())
en = [(1, None), (1, None), (2, None), (2, None), (2, None), (3, None)]
self.assertEqual(nulls, en)
maps = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfmap/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable").collect())
em = [(1, {}),
(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(2, {1.0: u'cc'}),
(3, {2.0: u'dd'})]
self.assertEqual(maps, em)
# arrays get pickled to tuples by default
tuples = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfarray/",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable").collect())
et = [(1, ()),
(2, (3.0, 4.0, 5.0)),
(3, (4.0, 5.0, 6.0))]
self.assertEqual(tuples, et)
# with custom converters, primitive arrays can stay as arrays
arrays = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfarray/",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter").collect())
ea = [(1, array('d')),
(2, array('d', [3.0, 4.0, 5.0])),
(3, array('d', [4.0, 5.0, 6.0]))]
self.assertEqual(arrays, ea)
clazz = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfclass/",
"org.apache.hadoop.io.Text",
"org.apache.spark.api.python.TestWritable").collect())
ec = (u'1',
{u'__class__': u'org.apache.spark.api.python.TestWritable',
u'double': 54.0, u'int': 123, u'str': u'test1'})
self.assertEqual(clazz[0], ec)
unbatched_clazz = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfclass/",
"org.apache.hadoop.io.Text",
"org.apache.spark.api.python.TestWritable",
batchSize=1).collect())
self.assertEqual(unbatched_clazz[0], ec)
def test_oldhadoop(self):
basepath = self.tempdir.name
ints = sorted(self.sc.hadoopFile(basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
hellopath = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
oldconf = {"mapred.input.dir": hellopath}
hello = self.sc.hadoopRDD("org.apache.hadoop.mapred.TextInputFormat",
"org.apache.hadoop.io.LongWritable",
"org.apache.hadoop.io.Text",
conf=oldconf).collect()
result = [(0, u'Hello World!')]
self.assertEqual(hello, result)
def test_newhadoop(self):
basepath = self.tempdir.name
ints = sorted(self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
hellopath = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
newconf = {"mapred.input.dir": hellopath}
hello = self.sc.newAPIHadoopRDD("org.apache.hadoop.mapreduce.lib.input.TextInputFormat",
"org.apache.hadoop.io.LongWritable",
"org.apache.hadoop.io.Text",
conf=newconf).collect()
result = [(0, u'Hello World!')]
self.assertEqual(hello, result)
def test_newolderror(self):
basepath = self.tempdir.name
self.assertRaises(Exception, lambda: self.sc.hadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
def test_bad_inputs(self):
basepath = self.tempdir.name
self.assertRaises(Exception, lambda: self.sc.sequenceFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.io.NotValidWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.hadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.NotValidInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.NotValidInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
def test_converters(self):
# use of custom converters
basepath = self.tempdir.name
maps = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfmap/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable",
keyConverter="org.apache.spark.api.python.TestInputKeyConverter",
valueConverter="org.apache.spark.api.python.TestInputValueConverter").collect())
em = [(u'\x01', []),
(u'\x01', [3.0]),
(u'\x02', [1.0]),
(u'\x02', [1.0]),
(u'\x03', [2.0])]
self.assertEqual(maps, em)
class TestOutputFormat(PySparkTestCase):
def setUp(self):
PySparkTestCase.setUp(self)
self.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.tempdir.name)
def tearDown(self):
PySparkTestCase.tearDown(self)
shutil.rmtree(self.tempdir.name, ignore_errors=True)
def test_sequencefiles(self):
basepath = self.tempdir.name
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.sc.parallelize(ei).saveAsSequenceFile(basepath + "/sfint/")
ints = sorted(self.sc.sequenceFile(basepath + "/sfint/").collect())
self.assertEqual(ints, ei)
ed = [(1.0, u'aa'), (1.0, u'aa'), (2.0, u'aa'), (2.0, u'bb'), (2.0, u'bb'), (3.0, u'cc')]
self.sc.parallelize(ed).saveAsSequenceFile(basepath + "/sfdouble/")
doubles = sorted(self.sc.sequenceFile(basepath + "/sfdouble/").collect())
self.assertEqual(doubles, ed)
ebs = [(1, bytearray(b'\x00\x07spam\x08')), (2, bytearray(b'\x00\x07spam\x08'))]
self.sc.parallelize(ebs).saveAsSequenceFile(basepath + "/sfbytes/")
bytes = sorted(self.sc.sequenceFile(basepath + "/sfbytes/").collect())
self.assertEqual(bytes, ebs)
et = [(u'1', u'aa'),
(u'2', u'bb'),
(u'3', u'cc')]
self.sc.parallelize(et).saveAsSequenceFile(basepath + "/sftext/")
text = sorted(self.sc.sequenceFile(basepath + "/sftext/").collect())
self.assertEqual(text, et)
eb = [(1, False), (1, True), (2, False), (2, False), (2, True), (3, True)]
self.sc.parallelize(eb).saveAsSequenceFile(basepath + "/sfbool/")
bools = sorted(self.sc.sequenceFile(basepath + "/sfbool/").collect())
self.assertEqual(bools, eb)
en = [(1, None), (1, None), (2, None), (2, None), (2, None), (3, None)]
self.sc.parallelize(en).saveAsSequenceFile(basepath + "/sfnull/")
nulls = sorted(self.sc.sequenceFile(basepath + "/sfnull/").collect())
self.assertEqual(nulls, en)
em = [(1, {}),
(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(2, {1.0: u'cc'}),
(3, {2.0: u'dd'})]
self.sc.parallelize(em).saveAsSequenceFile(basepath + "/sfmap/")
maps = sorted(self.sc.sequenceFile(basepath + "/sfmap/").collect())
self.assertEqual(maps, em)
def test_oldhadoop(self):
basepath = self.tempdir.name
dict_data = [(1, {}),
(1, {"row1": 1.0}),
(2, {"row2": 2.0})]
self.sc.parallelize(dict_data).saveAsHadoopFile(
basepath + "/oldhadoop/",
"org.apache.hadoop.mapred.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable")
result = sorted(self.sc.hadoopFile(
basepath + "/oldhadoop/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable").collect())
self.assertEqual(result, dict_data)
conf = {
"mapred.output.format.class": "org.apache.hadoop.mapred.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.hadoop.io.MapWritable",
"mapred.output.dir": basepath + "/olddataset/"
}
self.sc.parallelize(dict_data).saveAsHadoopDataset(conf)
input_conf = {"mapred.input.dir": basepath + "/olddataset/"}
old_dataset = sorted(self.sc.hadoopRDD(
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable",
conf=input_conf).collect())
self.assertEqual(old_dataset, dict_data)
def test_newhadoop(self):
basepath = self.tempdir.name
data = [(1, ""),
(1, "a"),
(2, "bcdf")]
self.sc.parallelize(data).saveAsNewAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text")
result = sorted(self.sc.newAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
self.assertEqual(result, data)
conf = {
"mapreduce.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.hadoop.io.Text",
"mapred.output.dir": basepath + "/newdataset/"
}
self.sc.parallelize(data).saveAsNewAPIHadoopDataset(conf)
input_conf = {"mapred.input.dir": basepath + "/newdataset/"}
new_dataset = sorted(self.sc.newAPIHadoopRDD(
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text",
conf=input_conf).collect())
self.assertEqual(new_dataset, data)
def test_newhadoop_with_array(self):
basepath = self.tempdir.name
# use custom ArrayWritable types and converters to handle arrays
array_data = [(1, array('d')),
(1, array('d', [1.0, 2.0, 3.0])),
(2, array('d', [3.0, 4.0, 5.0]))]
self.sc.parallelize(array_data).saveAsNewAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.DoubleArrayToWritableConverter")
result = sorted(self.sc.newAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter").collect())
self.assertEqual(result, array_data)
conf = {
"mapreduce.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.spark.api.python.DoubleArrayWritable",
"mapred.output.dir": basepath + "/newdataset/"
}
self.sc.parallelize(array_data).saveAsNewAPIHadoopDataset(
conf,
valueConverter="org.apache.spark.api.python.DoubleArrayToWritableConverter")
input_conf = {"mapred.input.dir": basepath + "/newdataset/"}
new_dataset = sorted(self.sc.newAPIHadoopRDD(
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter",
conf=input_conf).collect())
self.assertEqual(new_dataset, array_data)
def test_newolderror(self):
basepath = self.tempdir.name
rdd = self.sc.parallelize(range(1, 4)).map(lambda x: (x, "a" * x))
self.assertRaises(Exception, lambda: rdd.saveAsHadoopFile(
basepath + "/newolderror/saveAsHadoopFile/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat"))
self.assertRaises(Exception, lambda: rdd.saveAsNewAPIHadoopFile(
basepath + "/newolderror/saveAsNewAPIHadoopFile/",
"org.apache.hadoop.mapred.SequenceFileOutputFormat"))
def test_bad_inputs(self):
basepath = self.tempdir.name
rdd = self.sc.parallelize(range(1, 4)).map(lambda x: (x, "a" * x))
self.assertRaises(Exception, lambda: rdd.saveAsHadoopFile(
basepath + "/badinputs/saveAsHadoopFile/",
"org.apache.hadoop.mapred.NotValidOutputFormat"))
self.assertRaises(Exception, lambda: rdd.saveAsNewAPIHadoopFile(
basepath + "/badinputs/saveAsNewAPIHadoopFile/",
"org.apache.hadoop.mapreduce.lib.output.NotValidOutputFormat"))
def test_converters(self):
# use of custom converters
basepath = self.tempdir.name
data = [(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(3, {2.0: u'dd'})]
self.sc.parallelize(data).saveAsNewAPIHadoopFile(
basepath + "/converters/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
keyConverter="org.apache.spark.api.python.TestOutputKeyConverter",
valueConverter="org.apache.spark.api.python.TestOutputValueConverter")
converted = sorted(self.sc.sequenceFile(basepath + "/converters/").collect())
expected = [(u'1', 3.0),
(u'2', 1.0),
(u'3', 2.0)]
self.assertEqual(converted, expected)
def test_reserialization(self):
basepath = self.tempdir.name
x = range(1, 5)
y = range(1001, 1005)
data = zip(x, y)
rdd = self.sc.parallelize(x).zip(self.sc.parallelize(y))
rdd.saveAsSequenceFile(basepath + "/reserialize/sequence")
result1 = sorted(self.sc.sequenceFile(basepath + "/reserialize/sequence").collect())
self.assertEqual(result1, data)
rdd.saveAsHadoopFile(
basepath + "/reserialize/hadoop",
"org.apache.hadoop.mapred.SequenceFileOutputFormat")
result2 = sorted(self.sc.sequenceFile(basepath + "/reserialize/hadoop").collect())
self.assertEqual(result2, data)
rdd.saveAsNewAPIHadoopFile(
basepath + "/reserialize/newhadoop",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
result3 = sorted(self.sc.sequenceFile(basepath + "/reserialize/newhadoop").collect())
self.assertEqual(result3, data)
conf4 = {
"mapred.output.format.class": "org.apache.hadoop.mapred.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.dir": basepath + "/reserialize/dataset"}
rdd.saveAsHadoopDataset(conf4)
result4 = sorted(self.sc.sequenceFile(basepath + "/reserialize/dataset").collect())
self.assertEqual(result4, data)
conf5 = {"mapreduce.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.dir": basepath + "/reserialize/newdataset"}
rdd.saveAsNewAPIHadoopDataset(conf5)
result5 = sorted(self.sc.sequenceFile(basepath + "/reserialize/newdataset").collect())
self.assertEqual(result5, data)
def test_unbatched_save_and_read(self):
basepath = self.tempdir.name
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.sc.parallelize(ei, len(ei)).saveAsSequenceFile(
basepath + "/unbatched/")
unbatched_sequence = sorted(self.sc.sequenceFile(
basepath + "/unbatched/",
batchSize=1).collect())
self.assertEqual(unbatched_sequence, ei)
unbatched_hadoopFile = sorted(self.sc.hadoopFile(
basepath + "/unbatched/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text",
batchSize=1).collect())
self.assertEqual(unbatched_hadoopFile, ei)
unbatched_newAPIHadoopFile = sorted(self.sc.newAPIHadoopFile(
basepath + "/unbatched/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text",
batchSize=1).collect())
self.assertEqual(unbatched_newAPIHadoopFile, ei)
oldconf = {"mapred.input.dir": basepath + "/unbatched/"}
unbatched_hadoopRDD = sorted(self.sc.hadoopRDD(
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text",
conf=oldconf,
batchSize=1).collect())
self.assertEqual(unbatched_hadoopRDD, ei)
newconf = {"mapred.input.dir": basepath + "/unbatched/"}
unbatched_newAPIHadoopRDD = sorted(self.sc.newAPIHadoopRDD(
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text",
conf=newconf,
batchSize=1).collect())
self.assertEqual(unbatched_newAPIHadoopRDD, ei)
def test_malformed_RDD(self):
basepath = self.tempdir.name
# non-batch-serialized RDD[[(K, V)]] should be rejected
data = [[(1, "a")], [(2, "aa")], [(3, "aaa")]]
rdd = self.sc.parallelize(data, len(data))
self.assertRaises(Exception, lambda: rdd.saveAsSequenceFile(
basepath + "/malformed/sequence"))
class TestDaemon(unittest.TestCase):
def connect(self, port):
from socket import socket, AF_INET, SOCK_STREAM
sock = socket(AF_INET, SOCK_STREAM)
sock.connect(('127.0.0.1', port))
# send a split index of -1 to shutdown the worker
sock.send("\xFF\xFF\xFF\xFF")
sock.close()
return True
def do_termination_test(self, terminator):
from subprocess import Popen, PIPE
from errno import ECONNREFUSED
# start daemon
daemon_path = os.path.join(os.path.dirname(__file__), "daemon.py")
daemon = Popen([sys.executable, daemon_path], stdin=PIPE, stdout=PIPE)
# read the port number
port = read_int(daemon.stdout)
# daemon should accept connections
self.assertTrue(self.connect(port))
# request shutdown
terminator(daemon)
time.sleep(1)
# daemon should no longer accept connections
try:
self.connect(port)
except EnvironmentError as exception:
self.assertEqual(exception.errno, ECONNREFUSED)
else:
self.fail("Expected EnvironmentError to be raised")
def test_termination_stdin(self):
"""Ensure that daemon and workers terminate when stdin is closed."""
self.do_termination_test(lambda daemon: daemon.stdin.close())
def test_termination_sigterm(self):
"""Ensure that daemon and workers terminate on SIGTERM."""
from signal import SIGTERM
self.do_termination_test(lambda daemon: os.kill(daemon.pid, SIGTERM))
class TestWorker(PySparkTestCase):
def test_cancel_task(self):
temp = tempfile.NamedTemporaryFile(delete=True)
temp.close()
path = temp.name
def sleep(x):
import os
import time
with open(path, 'w') as f:
f.write("%d %d" % (os.getppid(), os.getpid()))
time.sleep(100)
# start job in background thread
def run():
self.sc.parallelize(range(1)).foreach(sleep)
import threading
t = threading.Thread(target=run)
t.daemon = True
t.start()
daemon_pid, worker_pid = 0, 0
while True:
if os.path.exists(path):
data = open(path).read().split(' ')
daemon_pid, worker_pid = map(int, data)
break
time.sleep(0.1)
# cancel jobs
self.sc.cancelAllJobs()
t.join()
for i in range(50):
try:
os.kill(worker_pid, 0)
time.sleep(0.1)
except OSError:
break # worker was killed
else:
self.fail("worker has not been killed after 5 seconds")
try:
os.kill(daemon_pid, 0)
except OSError:
self.fail("daemon had been killed")
# run a normal job
rdd = self.sc.parallelize(range(100), 1)
self.assertEqual(100, rdd.map(str).count())
def test_fd_leak(self):
N = 1100 # fd limit is 1024 by default
rdd = self.sc.parallelize(range(N), N)
self.assertEquals(N, rdd.count())
def test_after_exception(self):
def raise_exception(_):
raise Exception()
rdd = self.sc.parallelize(range(100), 1)
self.assertRaises(Exception, lambda: rdd.foreach(raise_exception))
self.assertEqual(100, rdd.map(str).count())
def test_after_jvm_exception(self):
tempFile = tempfile.NamedTemporaryFile(delete=False)
tempFile.write("Hello World!")
tempFile.close()
data = self.sc.textFile(tempFile.name, 1)
filtered_data = data.filter(lambda x: True)
self.assertEqual(1, filtered_data.count())
os.unlink(tempFile.name)
self.assertRaises(Exception, lambda: filtered_data.count())
rdd = self.sc.parallelize(range(100), 1)
self.assertEqual(100, rdd.map(str).count())
def test_accumulator_when_reuse_worker(self):
from pyspark.accumulators import INT_ACCUMULATOR_PARAM
acc1 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM)
self.sc.parallelize(range(100), 20).foreach(lambda x: acc1.add(x))
self.assertEqual(sum(range(100)), acc1.value)
acc2 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM)
self.sc.parallelize(range(100), 20).foreach(lambda x: acc2.add(x))
self.assertEqual(sum(range(100)), acc2.value)
self.assertEqual(sum(range(100)), acc1.value)
class TestSparkSubmit(unittest.TestCase):
def setUp(self):
self.programDir = tempfile.mkdtemp()
self.sparkSubmit = os.path.join(os.environ.get("SPARK_HOME"), "bin", "spark-submit")
def tearDown(self):
shutil.rmtree(self.programDir)
def createTempFile(self, name, content):
"""
Create a temp file with the given name and content and return its path.
Strips leading spaces from content up to the first '|' in each line.
"""
pattern = re.compile(r'^ *\|', re.MULTILINE)
content = re.sub(pattern, '', content.strip())
path = os.path.join(self.programDir, name)
with open(path, "w") as f:
f.write(content)
return path
def createFileInZip(self, name, content):
"""
Create a zip archive containing a file with the given content and return its path.
Strips leading spaces from content up to the first '|' in each line.
"""
pattern = re.compile(r'^ *\|', re.MULTILINE)
content = re.sub(pattern, '', content.strip())
path = os.path.join(self.programDir, name + ".zip")
zip = zipfile.ZipFile(path, 'w')
zip.writestr(name, content)
zip.close()
return path
def test_single_script(self):
"""Submit and test a single script file"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|sc = SparkContext()
|print sc.parallelize([1, 2, 3]).map(lambda x: x * 2).collect()
""")
proc = subprocess.Popen([self.sparkSubmit, script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 4, 6]", out)
def test_script_with_local_functions(self):
"""Submit and test a single script file calling a global function"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|def foo(x):
| return x * 3
|
|sc = SparkContext()
|print sc.parallelize([1, 2, 3]).map(foo).collect()
""")
proc = subprocess.Popen([self.sparkSubmit, script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[3, 6, 9]", out)
def test_module_dependency(self):
"""Submit and test a script with a dependency on another module"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print sc.parallelize([1, 2, 3]).map(myfunc).collect()
""")
zip = self.createFileInZip("mylib.py", """
|def myfunc(x):
| return x + 1
""")
proc = subprocess.Popen([self.sparkSubmit, "--py-files", zip, script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out)
def test_module_dependency_on_cluster(self):
"""Submit and test a script with a dependency on another module on a cluster"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print sc.parallelize([1, 2, 3]).map(myfunc).collect()
""")
zip = self.createFileInZip("mylib.py", """
|def myfunc(x):
| return x + 1
""")
proc = subprocess.Popen([self.sparkSubmit, "--py-files", zip, "--master",
"local-cluster[1,1,512]", script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out)
def test_single_script_on_cluster(self):
"""Submit and test a single script on a cluster"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|def foo(x):
| return x * 2
|
|sc = SparkContext()
|print sc.parallelize([1, 2, 3]).map(foo).collect()
""")
proc = subprocess.Popen(
[self.sparkSubmit, "--master", "local-cluster[1,1,512]", script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 4, 6]", out)
class ContextStopTests(unittest.TestCase):
def test_stop(self):
sc = SparkContext()
self.assertNotEqual(SparkContext._active_spark_context, None)
sc.stop()
self.assertEqual(SparkContext._active_spark_context, None)
def test_with(self):
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
self.assertEqual(SparkContext._active_spark_context, None)
def test_with_exception(self):
try:
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
raise Exception()
except:
pass
self.assertEqual(SparkContext._active_spark_context, None)
def test_with_stop(self):
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
sc.stop()
self.assertEqual(SparkContext._active_spark_context, None)
@unittest.skipIf(not _have_scipy, "SciPy not installed")
class SciPyTests(PySparkTestCase):
"""General PySpark tests that depend on scipy """
def test_serialize(self):
from scipy.special import gammaln
x = range(1, 5)
expected = map(gammaln, x)
observed = self.sc.parallelize(x).map(gammaln).collect()
self.assertEqual(expected, observed)
@unittest.skipIf(not _have_numpy, "NumPy not installed")
class NumPyTests(PySparkTestCase):
"""General PySpark tests that depend on numpy """
def test_statcounter_array(self):
x = self.sc.parallelize([np.array([1.0, 1.0]), np.array([2.0, 2.0]), np.array([3.0, 3.0])])
s = x.stats()
self.assertSequenceEqual([2.0, 2.0], s.mean().tolist())
self.assertSequenceEqual([1.0, 1.0], s.min().tolist())
self.assertSequenceEqual([3.0, 3.0], s.max().tolist())
self.assertSequenceEqual([1.0, 1.0], s.sampleStdev().tolist())
if __name__ == "__main__":
if not _have_scipy:
print "NOTE: Skipping SciPy tests as it does not seem to be installed"
if not _have_numpy:
print "NOTE: Skipping NumPy tests as it does not seem to be installed"
unittest.main()
if not _have_scipy:
print "NOTE: SciPy tests were skipped as it does not seem to be installed"
if not _have_numpy:
print "NOTE: NumPy tests were skipped as it does not seem to be installed"
|
uno_server.py
|
import uno_module as uno
import os
import uno_ai as ai
import socket
from _thread import *
import threading
import config
import sys
# local declarations
currentId = "0"
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server = 'localhost'
port = 5555
server_ip = socket.gethostbyname(server)
try:
s.bind((server, port))
except socket.error as e:
print(str(e))
roomSize = int(sys.argv[1])
s.listen(roomSize)
print("Waiting for a connection")
def parse(message):
'''
decodes messages from clients
'''
id = message.split(":")[0]
choice = message.split(":")[1].split(",")[0]
colour = message.split(":")[1].split(",")[1]
return choice, colour
def encode(dct):
'''
converts dct into sendable string
'''
string = str(dct['topCard']) + ":" + str(dct['hand']) + ":" + str(dct['eventID']) + ":" + str(dct['drawnCards']) + ":" + str(dct['playerTurn']) + ":" + str(dct['winner']) + ":" + str(dct['chosenColour'])
return string
def run_once(f):
'''
lets a function run only once
'''
def wrapper(*args, **kwargs):
if not wrapper.has_run:
wrapper.has_run = True
return f(*args, **kwargs)
wrapper.has_run = False
return wrapper
def database(name='0', eventID=0, drawnCards='00'):
'''
stores data for all clients to take from
'''
dct = {}
for i in range(len(config.myPlayers)):
playerTurn = 0
if name == config.myPlayers[i]['name']:
playerTurn = 1
else:
eventID = 0
drawnCards = '00'
dct[config.myPlayers[i]['name']] = {'topCard': config.myDiscard_pile.stack[-1].conv(),
'hand':config.myPlayers[i]['hand'].conv(),
'eventID': eventID,
'playerTurn': playerTurn,
'drawnCards': drawnCards,
'winner': config.Winner,
'chosenColour': config.assumedColour
}
return dct
@run_once
def prepare():
'''
preapares stuff before starting server
'''
print("preparing")
for i in range(roomSize):
sample_dict = {'name': 0, 'hand': 0}
sample_dict['name'] = config.myPlayerList[i]
stack = uno.Stack()
stack.add(config.myDeck.deal(7))
sample_dict['hand'] = stack
config.myPlayers.append(sample_dict)
print("config.myPlayers ready")
# doesn't allow +4 on first turn
while True:
if config.myDeck.deck[0].card['colour'] == 'X':
config.myDeck.shuffle()
else:
break
print("wild clause")
# creating config.myReplies
for i in config.myPlayerList:
sample_dict = {'choice':'0', 'colour':'0'}
config.myReplies[i] = sample_dict
print("config.myReplies ready")
config.myStorage = database()
config.myPreparations_complete = True
print("preparations complete")
def Deal(n):
'''
deals cards to players from deck
'''
dealt_cards = []
rem = n
if len(config.myDeck.deck) < n:
dealt_cards += config.myDeck.deck
rem = n - len(dealt_cards)
config.myDeck.deck = list(config.myDiscard_pile.stack)
config.myDiscard_pile.stack.clear()
config.myDiscard_pile.stack.append(config.myDeck.deck.pop())
config.myDeck.shuffle()
return config.myDeck.deal(rem) + dealt_cards
def threaded_server():
'''
starts server in a separate thread
'''
while True:
if roomSize == len(config.myPlayerList):
prepare()
else:
continue
print(f"it's {config.myPlayers[0]['name']} turn")
if config.actionEffect == False:
copied_hand = uno.Stack()
lst = list(config.myPlayers[0]['hand'].stack)
copied_hand.add(lst)
playable_cards = uno.isPlayable(config.myDiscard_pile.stack[-1], copied_hand, config.assumedColour)
if playable_cards != 'None':
print(f"{config.myPlayers[0]['name']} has playable cards")
config.myStorage = database(config.myPlayers[0]['name'], 1)
while True:
# trapping until input is recieved
choice = int(config.myReplies[config.myPlayers[0]['name']]['choice'])
if choice != 0:
print(f"{config.myPlayers[0]['name']} HAS CHOSEN")
config.assumedColour = config.myReplies[config.myPlayers[0]['name']]['colour']
played_card = choice-1
config.myDiscard_pile.add(playable_cards.deal(0, played_card))
copied_hand.add(playable_cards.stack)
config.myPlayers[0]['hand'].clear()
config.myPlayers[0]['hand'].add(copied_hand.stack) # at this point we have successfully played the card
print(f"remaining hand: {config.myPlayers[0]['hand'].show()}")
print(f"hand length = {len(config.myPlayers[0]['hand'].stack)}")
if uno.isAction(config.myDiscard_pile.stack[-1]) != 'None' and config.myDiscard_pile.stack[-1].card['val'] != 'wild':
config.actionEffect = True
if len(config.myPlayers[0]['hand'].stack) == 0:
print(f"Winner is {config.myPlayers[0]['name']}!!")
config.Winner = str(config.myPlayers[0]['name'])
config.myPlayers[0]['hand'].add(config.myDiscard_pile.deal(0,0))
break
else:
# no cards situation
print(f"{config.myPlayers[0]['name']} has NO playable cards")
config.myPlayers[0]['hand'].add(Deal(1))
config.myStorage = database(config.myPlayers[0]['name'], 2, config.myPlayers[0]['hand'].stack[-1].conv())
while True:
# trapping until input is recieved
colour = config.myReplies[config.myPlayers[0]['name']]['colour']
if colour =='N': # N = Nil[client has received data]
print(f"{config.myPlayers[0]['name']} HAS RECEIVED")
break
if uno.isAction(config.myDiscard_pile.stack[-1]) == 'rev':
config.actionEffect = False
config.myPlayers.reverse()
continue
config.myPlayers.append(config.myPlayers.pop(0))
else:
print(f"{config.myPlayers[0]['name']} is facing an action situation")
# write action code
if uno.isAction(config.myDiscard_pile.stack[-1]) == 'skp':
config.actionEffect = False
config.myStorage = database(config.myPlayers[0]['name'], 5)
while True:
# trapping until input is recieved
colour = config.myReplies[config.myPlayers[0]['name']]['colour']
if colour =='N': # N = Nil[client has received data]
print(f"{config.myPlayers[0]['name']} HAS RECEIVED")
break
config.myPlayers.append(config.myPlayers.pop(0))
elif uno.isAction(config.myDiscard_pile.stack[-1]) == '+2':
config.actionEffect = False
config.myPlayers[0]['hand'].add(Deal(2))
drawn_cards = config.myPlayers[0]['hand'].stack[-1].conv() + "," + config.myPlayers[0]['hand'].stack[-2].conv()
config.myStorage = database(config.myPlayers[0]['name'], 3, drawn_cards)
while True:
# trapping until input is recieved
colour = config.myReplies[config.myPlayers[0]['name']]['colour']
if colour =='N': # N = Nil[client has received data]
print(f"{config.myPlayers[0]['name']} HAS RECEIVED")
break
config.myPlayers.append(config.myPlayers.pop(0))
elif uno.isAction(config.myDiscard_pile.stack[-1]) == '+4':
config.actionEffect = False
config.myPlayers[0]['hand'].add(Deal(4))
drawn_cards = config.myPlayers[0]['hand'].stack[-1].conv() + "," + config.myPlayers[0]['hand'].stack[-2].conv() + "," + config.myPlayers[0]['hand'].stack[-3].conv() + "," + config.myPlayers[0]['hand'].stack[-4].conv()
config.myStorage = database(config.myPlayers[0]['name'], 3, drawn_cards)
while True:
# trapping until input is recieved
colour = config.myReplies[config.myPlayers[0]['name']]['colour']
if colour =='N': # N = Nil[client has received data]
print(f"{config.myPlayers[0]['name']} HAS RECEIVED")
break
config.myPlayers.append(config.myPlayers.pop(0))
th = threading.Thread(target=threaded_server)
th.start()
def threaded_client(conn):
'''
runs new threads for new clients
'''
global currentId
conn.send(str.encode(currentId))
currentId = str(int(currentId) + 1)
reply = ''
name = f"player-{currentId}"
while True:
try:
data = conn.recv(2048)
reply = data.decode('utf-8')
if not data:
conn.send(str.encode("Goodbye"))
break
else:
if ":" in reply: # if client requests game data
print(name)
config.myReplies[name]['choice'], config.myReplies[name]['colour'] = parse(reply)
if config.myReplies[name]['choice'] != '0':
config.myStorage[name]['playerTurn'] = '0'
config.myStorage[name]['eventID'] = '0'
reply = encode(config.myStorage[name])
print(f"Latest reply = {reply}")
else:
reply = encode(config.myStorage[name])
else: # if client requests config.myPlayerList
config.myPlayerList.append(reply)
name = reply
while True: # trap until all Players join
if (len(config.myPlayerList) == roomSize) and (config.myPreparations_complete == True):
reply = ",".join(config.myPlayerList)
print(f"Sending to : " + reply)
break
else:
print(f"config.myPlayers joined({len(config.myPlayerList)}/{roomSize})")
print("Waiting...")
print(f"Sending to {name}: {reply}")
conn.sendall(str.encode(reply))
except:
print("break from threaded_client")
raise
print("Connection Closed")
conn.close()
while True:
'''
manages new players
'''
conn, addr = s.accept()
print("Connected to: ", addr)
start_new_thread(threaded_client, (conn,))
|
main.py
|
#!/bin/python2
import commands
import fcntl
import json
import random
import re
import socket
import struct
import urllib2 as urllib
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from SocketServer import ThreadingMixIn
from cv2 import imshow, namedWindow, setWindowProperty
from datetime import timedelta
from platform import machine
from threading import Thread
from time import sleep, time
import os
import cv2
import numpy as np
from btns import desk, Button
from log import Log
def uptime():
with open('/proc/uptime', 'r') as f:
uptime_seconds = int(float(f.readline().split()[0]))
uptime_string = str(timedelta(seconds=uptime_seconds))
return uptime_string
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
addr = socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, struct.pack('256s', ifname[:15]))[20:24])
except:
addr = None
return addr
STAGE = 0
LANCAM = list()
WindowName = 'Term'
FULLSCREEN = True
RUN = True
__version__ = 0.6
log = None
ip = get_ip_address('eth0' if machine() == 'armv7l' else 'wlp3s0')
log = Log(ip)
cam = list()
btncss = '<head><script src="https://ajax.googleapis.com/ajax/libs/jquery/3.2.1/jquery.min.js"></script>\n<style type="text/css">a.button {-webkit-appearance: button;\n-moz-appearance: button;\nappearance: button;\ntext-decoration: none;\ncolor: initial;}\n</style></head>'
server = None
with open('settings.json') as json_data:
settings = json.load(json_data)
def get_cam(num):
path = '/dev/v4l/by-path/'+settings['cam'][num]
real_path = os.readlink(path)
num = re.findall(r'video(\d+)', real_path)[0]
return int(num)
def geturl(text, retry=True):
log.debug("SEND: " + str(text))
try:
url = urllib.urlopen(text)
except Exception as e:
log.error(e)
return e
ret = url.getcode()
if ret != 200:
if retry:
log.warning('RET: ' + str(ret))
geturl(text, False)
else:
log.error('RET: ' + str(ret))
url.close()
return ret
def htmlButton(text, href, js=False):
if js:
href = 'javascript: $.get(\'%s\', function( data ) {console.log(data);})' % href
return '<a href="%s" class="button" style="margin: 5px; padding: 5px">%s</a>' % (href, text)
class CamHandler(BaseHTTPRequestHandler):
streams = None
def log_message(self, format, *args):
pass
def do_GET(self):
path = self.path.split('/')[1:]
data = path[-1]
name = data
args = dict()
end = str()
server = self.client_address[0]
if self.path not in ['/0', '/favicon.ico', '/game'] and 'stats' not in self.path:
log.debug(str(server) + " -> " + self.path)
if '?' in data:
name, args = data.split('?')
args = args.split('&')
ar = dict()
for i in range(len(args)):
r = args[i].split('=')
if len(r) == 2:
ar.update({r[0]: r[1]})
else:
ar.update({r[0]: str()})
args = ar
if '.' in name:
name, end = name.split('.')
if name == 'execute_1':
# game.setServer(server)
param_1 = int(args.get('param_1', ''))
game.start(param_1)
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write('{"success": 1}')
if name == '0' and end == '':
game.setServer(server)
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write('{"state_int_1": %i, "state_int_2": %i}' % (game.stage, game.round))
if name == 'game':
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write('{"stage": %i, "round": %i, "uptime": "%s"}' % (game.stage, game.round, uptime()))
if name == 'log':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><body><pre>')
with open('log.log') as f:
for i in f:
self.wfile.write(str(i))
self.wfile.write('</pre></body></html>')
if name == 'reboot':
a = commands.getoutput('sudo reboot')
if end == 'mjpg':
try:
self.send_response(200)
self.send_header('Content-type', 'multipart/x-mixed-replace; boundary=--jpgboundary')
self.send_header('Connection', 'keep-alive')
self.end_headers()
except Exception as e:
log.error(str(server) + ' -> ' + str(e))
return
while RUN and self.connection._sock != None:
try:
num = int(name)
img = str(self.streams[num])
if len(img) == 0:
continue
try:
data = b'--jpgboundary\r\n'
data += b'Content-type: image/jpeg\r\n'
data += b'Content-length: %i\r\n' % len(img)
data += b'\r\n'
data += img
data += b'\r\n'
self.connection._sock.send(data)
except Exception as e:
log.error(str(server) + ' -> ' + str(e))
break
except KeyboardInterrupt:
break
return
if self.path == '/':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html>')
self.wfile.write(btncss)
self.wfile.write('<body>')
# self.wfile.write('stage: %i</br>round: %i</br>' % (game.stage, game.round))
self.wfile.write('stage: <a id="stage"></a></br>')
self.wfile.write('round: <a id="round"></a></br>')
self.wfile.write('uptime: <a id="uptime"></a></br>')
self.wfile.write('<div>')
for i in range(2):
self.wfile.write('<div style="%s">' % ('display: inline-block;'))
self.wfile.write('<a><img id="img%(id)i" src="/%(id)i.mjpg"/></a>' % {'id': i})
self.wfile.write('</br><a id="stats%s"></a>' % i)
self.wfile.write('</br><a id="lanstats%s"></a>' % i)
self.wfile.write('</div>')
self.wfile.write('</div>')
for i in settings.get(ip, []):
self.wfile.write(htmlButton(i[0], '//' + str(i[0]) + ':8080/'))
self.wfile.write('</br>')
self.wfile.write(htmlButton('Off', '/execute_1?param_1=0', True))
for i in range(3):
self.wfile.write(htmlButton('Level %i' % (i + 1), '/execute_1?param_1=%i' % (i + 1), True))
self.wfile.write('</br>')
self.wfile.write(htmlButton('Log', '/log'))
self.wfile.write('</br>')
self.wfile.write(htmlButton('Reboot', '/reboot', True) + '</br>')
ajax = '''<script>setInterval(function(){$.ajax({
url: "/game",
success: function(result) {
$("#stage").html(result['stage']);
$("#round").html(result['round']);
$("#uptime").html(result['uptime']);
}
})}, 500)</script>'''
self.wfile.write(ajax)
for i in range(2):
ajax = '''<script>setInterval(function(){$.ajax({
url: "/%(id)i.stats",
success: function(result) {
$("#stats%(id)i").html(result["status"]+" "+result["time"]);
}
})}, 500)</script>''' % {'id': i}
self.wfile.write(ajax)
for i in range(2):
ajax = '''<script>setInterval(function(){$.ajax({
url: "/%(id)i.stats",
data: "lan",
success: function(result) {
$("#lanstats%(id)i").html(result["status"]+" "+result["time"]);
}
})}, 500)</script>''' % {'id': i}
self.wfile.write(ajax)
self.wfile.write('</body></html>')
if name.isdigit():
num = int(name)
if end == 'stats':
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
if 'lan' in args:
tm = time() - LANCAM[num].time
self.wfile.write('{"status":"%s",' % LANCAM[num].state)
self.wfile.write('"time": "%s"}' % str(round(tm, 4)))
else:
tm = time() - self.streams[num].time
self.wfile.write('{"status":"%s",' % self.streams[num].state)
self.wfile.write('"time": "%s"}' % str(round(tm, 4)))
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
class VideoStream:
frame = None
grabbed = None
stream = None
stopped = True
paused = False
src = None
net = None
def __init__(self, src=0):
self.state = 'creating'
if type(src) == int:
self.src = src
log.info('Creating camera %s' % src)
self.stream = cv2.VideoCapture(get_cam(src))
self.stream.set(3, 320)
self.stream.set(4, 240)
try:
# self.grabbed, self.frame = self.stream.read()
self.grabbed = self.stream.grab()
except Exception as e:
log.error('Camera %s error: %s' % (self.src, e))
self.stopped = False
else:
pattern = r"^http:\/\/(?P<ip>[0-9.]+):(?P<port>[0-9]+)\/(?P<fn>.+)\.(?P<ft>.+)$"
self.net = re.search(pattern, src).groupdict()
self.src = str(src)
self.stopped = False
self.paused = True
self.time = time()
self.th = Thread(target=self.update)
self.th.start()
def netconn(self):
stream = None
while stream is None:
try:
stream = urllib.urlopen(self.src, timeout=1)
except Exception as e:
log.error(self.src + ': ' + str(e))
sleep(1)
return stream
def update(self):
try:
if type(self.src) == str:
stream = None
self.data = bytes()
self.state = 'starting'
while RUN:
if self.stopped:
log.warning('cam %s stoping' % str(self.src))
self.state = 'stopped'
return
if self.paused:
self.state = 'paused'
continue
if stream is None:
self.state = 'wait for stream'
stream = self.netconn()
if len(self.data) == 0:
while self.data != b'-':
self.state = 'wait for "-"'
try:
self.data = stream.read(1)
except Exception as e:
log.error(e)
self.data = bytes()
stream.close()
stream = None
continue
try:
self.state = 'read stream %i' % len(self.data)
self.data += stream.read(1)
except Exception as e:
log.error(e)
self.data = bytes()
stream.close()
stream = None
continue
b = self.data.find(b'\r\n\r\n')
if b == -1:
if len(self.data) > 1000:
self.state = 'reset'
self.data = bytes()
continue
a = self.data.find(b'--')
if a != -1 and b != -1:
head = self.data[a:b].split('\r\n')
self.data = bytes()
for i in head:
if 'length' in i:
l = int(i[i.find(': ') + 2:])
self.jpg = bytes()
while len(self.jpg) < l:
try:
self.state = 'read jpg'
self.jpg += stream.read(l - len(self.jpg))
except Exception as e:
log.error(str(e))
stream.close()
stream = None
self.jpg = bytes()
break
if len(self.jpg) == l:
try:
self.state = 'parse jpg'
img = cv2.imdecode(np.fromstring(self.jpg, dtype=np.uint8), cv2.IMREAD_COLOR)
self.frame = img
self.time = time()
except Exception as e:
log.error(e)
else:
while RUN:
if self.stopped:
self.state = 'stopped'
log.warning('cam %s stoping' % str(self.src))
return
if self.paused:
self.state = 'paused'
continue
try:
self.grabbed = self.stream.grab()
self.state = 'get frame'
_, self.frame = self.stream.retrieve()
self.time = time()
except Exception as e:
log.error('Camera %s error: %s' % (self.src, e))
except Exception as e:
log.critical(str(e))
def read(self):
if self.paused:
log.debug('cam %s -> start' % str(self.src))
self.paused = False
if not self.th.isAlive():
log.critical('Thread %s not alive, restarting...' % self.src)
self.th = Thread(target=self.update)
self.th.start()
img = self.frame
return img
def pause(self):
self.paused = True
log.debug('cam %s -> pause' % str(self.src))
return self
def start(self):
self.paused = False
log.debug('cam %s -> start' % str(self.src))
return self
def stop(self):
self.stopped = True
log.debug('cam %s -> stop' % str(self.src))
return self
def __del__(self):
self.stop()
self.stream.release()
def __str__(self):
f = self.read()
if f is not None:
return cv2.imencode(".png", self.frame)[1].tostring()
return ''
def getImg(c):
for i in c:
d = i.read()
yield d
def comp(*img):
img = list(img)
img = filter(lambda x: x is not None, img)
if len(img) == 0:
vis = np.zeros((1, 1), np.uint8)
frame = cv2.resize(vis, tuple(settings['size']))
return frame
h = [i.shape[0] for i in img]
w = [i.shape[1] for i in img]
sz = (max(h), sum(w), 3)
vis = np.zeros(sz, np.uint8)
for i in range(len(img)):
vis[:h[i], sum(w[:i]):sum(w[:(i + 1)])] = img[i]
frame = cv2.resize(vis, tuple(settings['size']))
return frame
class Game:
stage = 0
round = 0
btns = list()
server = '127.0.0.1'
def __init__(self):
self.stage = 0
self.round = 0
Button.callback = self.clicked
def setServer(self, s):
if self.server != s and s != '127.0.0.1' and s != ip:
self.server = s
log.info('New server: ' + str(s))
def start(self, num):
if self.stage == num:
return None
if num != 3:
for i in LANCAM:
i.pause()
if num != 0:
log.info('Starting round ' + str(num))
self.getRandBtns()
for i in cam:
i.paused = False
else:
log.info('Stop game')
desk.leds(False)
self.btns = list()
for i in cam:
i.paused = True
self.round = 0
self.stage = num
def getRandBtns(self):
desk.leds(False)
for i in desk.L:
i.clicked = False
for i in desk.R:
i.clicked = False
self.btns = list([random.choice(desk.L), random.choice(desk.R)])
for i in self.btns:
i.led(True)
log.info('Random buttons: ' + str([str(i) for i in self.btns]))
def endStage(self):
log.info('End stage: ' + str(self.stage))
geturl('http://%s:3000/events/0/event_1?param_1=%i' % (self.server, self.stage))
if self.stage != 3:
self.round = 0
self.stage = 0
def nextRound(self):
endRound = {1: 3, 2: 2, 3: 1}
log.info('End round: %i/%i' % (self.round + 1, endRound[self.stage] + 1))
if self.round == endRound[self.stage]:
self.endStage()
return
else:
self.round += 1
self.getRandBtns()
def resetRound(self):
log.info('Reset round')
self.round = 0
self.getRandBtns()
def clicked(self, btn):
if self.stage == 0:
return
log.info('Click: ' + str(btn))
if btn not in self.btns:
self.resetRound()
else:
btn.led(False)
if self.btns[0].clicked and self.btns[1].clicked:
self.nextRound()
def createFrame():
frames = list()
if game.stage == 1:
frames = getImg(cam)
elif game.stage == 2:
a = cam[::-1]
frames = getImg(a)
elif game.stage == 3:
for i in LANCAM:
if i.paused:
i.start()
frames = getImg(LANCAM)
frame = comp(*frames)
return frame
def window(*cam):
global STAGE
global RUN
while RUN:
frame = createFrame()
if frame is not None:
imshow(WindowName, frame)
key = cv2.waitKey(1)
if key & 0xFF == 27:
RUN = False
break
if key & 0xFF == 32:
STAGE = (STAGE + 1) % 3
for i in cam:
i.stop()
cv2.destroyAllWindows()
exit(-1)
def serve():
global server
CamHandler.streams = cam
server = ThreadedHTTPServer(('', settings['port']), CamHandler)
log.info("Server started")
server.serve_forever()
game = Game()
if __name__ == '__main__':
WindowName = str(ip)
other = settings.get(ip, [])
log.info('Version: %s' % __version__)
log.info('OpenCV: %s' % cv2.__version__)
log.info('My addr: %s' % ip)
log.info('Other: %s' % other)
cam = list([VideoStream(0).start(), VideoStream(1).start()])
if FULLSCREEN:
namedWindow(WindowName, cv2.WND_PROP_FULLSCREEN)
if cv2.__version__.startswith('2.'):
setWindowProperty(WindowName, cv2.WND_PROP_FULLSCREEN, cv2.cv.CV_WINDOW_FULLSCREEN)
if cv2.__version__.startswith('3.'):
setWindowProperty(WindowName, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
th = Thread(target=window, args=(cam))
th.start()
th1 = Thread(target=serve, args=())
th1.start()
LANCAM = list()
for i in other:
url = 'http://' + str(i[0]) + ':' + str(settings['port']) + '/' + str(i[1]) + '.mjpg'
d = VideoStream(url)
log.info('NetCam created: ' + url)
LANCAM.append(d)
while RUN:
pass
log.info('Exit')
server.shutdown()
th1.join(1)
th.join(1)
|
log_battery.py
|
# log_battery.py/Open GoPro, Version 2.0 (C) Copyright 2021 GoPro, Inc. (http://gopro.com/OpenGoPro).
# This copyright was auto-generated on Wed, Sep 1, 2021 5:05:45 PM
"""Example to continuously read the battery (with no Wifi connection)"""
import csv
import time
import logging
import argparse
import threading
from pathlib import Path
from datetime import datetime
from dataclasses import dataclass
from typing import Optional, Tuple, Literal, List
from rich.console import Console
from open_gopro import GoPro
from open_gopro.constants import StatusId
from open_gopro.util import setup_logging, set_logging_level
logger = logging.getLogger(__name__)
console = Console() # rich consoler printer
BarsType = Literal[0, 1, 2, 3]
@dataclass
class Sample:
"""Simple class to store battery samples"""
index: int
percentage: int
bars: BarsType
def __post_init__(self) -> None:
self.time = datetime.now()
def __str__(self) -> str: # pylint: disable=missing-return-doc
return f"Index {self.index} @ time {self.time.strftime('%H:%M:%S')} --> bars: {self.bars}, percentage: {self.percentage}"
SAMPLE_INDEX = 0
SAMPLES: List[Sample] = []
def dump_results_as_csv(location: Path) -> None:
"""Write all of the samples to a csv file
Args:
location (Path): File to write to
"""
console.print(f"Dumping results as CSV to {location}")
with open(location, mode="w") as f:
w = csv.writer(f, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL)
w.writerow(["index", "time", "percentage", "bars"])
initial_time = SAMPLES[0].time
for s in SAMPLES:
w.writerow([s.index, (s.time - initial_time).seconds, s.percentage, s.bars])
def process_battery_notifications(gopro: GoPro, initial_bars: BarsType, initial_percentage: int) -> None:
"""Separate thread to continuously check for and store battery notifications.
If the CLI parameter was set to poll, this isn't used.
Args:
gopro (GoPro): instance to get updates from
initial_bars (BarsType): Initial bars level when notifications were enabled
initial_percentage (int): Initial percentage when notifications were enabled
"""
last_percentage = initial_percentage
last_bars = initial_bars
while True:
# Block until we receive an update
notification = gopro.get_update()
# Update data points if they have changed
last_percentage = (
notification.data[StatusId.INT_BATT_PER]
if StatusId.INT_BATT_PER in notification.data
else last_percentage
)
last_bars = (
notification.data[StatusId.BATT_LEVEL] if StatusId.BATT_LEVEL in notification.data else last_bars
)
# Append and print sample
global SAMPLE_INDEX
SAMPLES.append(Sample(index=SAMPLE_INDEX, percentage=last_percentage, bars=last_bars))
console.print(str(SAMPLES[-1]))
SAMPLE_INDEX += 1
def main() -> int:
"""Main program functionality
Returns:
int: program return code
"""
identifier, log_location, poll = parse_arguments()
global logger
logger = setup_logging(logger, log_location)
global SAMPLE_INDEX
gopro: Optional[GoPro] = None
return_code = 0
try:
with GoPro(identifier, enable_wifi=False) as gopro:
set_logging_level(logger, logging.ERROR)
# # Setup notifications if we are not polling
if poll is None:
console.print("Configuring battery notifications...")
# Enable notifications of the relevant battery statuses. Also store initial values.
bars = gopro.ble_status.batt_level.register_value_update().flatten
percentage = gopro.ble_status.int_batt_per.register_value_update().flatten
# Start a thread to handle asynchronous battery level notifications
threading.Thread(
target=process_battery_notifications, args=(gopro, bars, percentage), daemon=True
).start()
with console.status("[bold green]Receiving battery notifications until it dies..."):
# Sleep forever, allowing notification handler thread to deal with battery level notifications
while True:
time.sleep(1)
# Otherwise, poll
else:
with console.status("[bold green]Polling the battery until it dies..."):
while True:
SAMPLES.append(
Sample(
index=SAMPLE_INDEX,
percentage=gopro.ble_status.int_batt_per.get_value().flatten,
bars=gopro.ble_status.batt_level.get_value().flatten,
)
)
console.print(str(SAMPLES[-1]))
SAMPLE_INDEX += 1
time.sleep(poll)
except Exception as e: # pylint: disable=broad-except
logger.error(repr(e))
return_code = 1
except KeyboardInterrupt:
logger.warning("Received keyboard interrupt. Shutting down...")
finally:
if len(SAMPLES) > 0:
csv_location = Path(log_location.parent) / "battery_results.csv"
dump_results_as_csv(csv_location)
if gopro is not None:
gopro.close()
console.print("Exiting...")
return return_code # pylint: disable=lost-exception
def parse_arguments() -> Tuple[str, Path, Optional[int]]:
"""Parse command line arguments
Returns:
Tuple[str, Path, Path]: (identifier, path to save log, path to VLC)
"""
parser = argparse.ArgumentParser(
description="Connect to the GoPro via BLE only and continuously read the battery (either by polling or notifications)."
)
parser.add_argument(
"-i",
"--identifier",
type=str,
help="Last 4 digits of GoPro serial number, which is the last 4 digits of the default camera SSID. \
If not used, first discovered GoPro will be connected to",
default=None,
)
parser.add_argument(
"-l",
"--log",
type=Path,
help="Location to store detailed log",
default="log_battery.log",
)
parser.add_argument(
"-p",
"--poll",
type=int,
help="Set to poll the battery at a given interval. If not set, battery level will be notified instead. Defaults to notifications.",
default=None,
)
args = parser.parse_args()
return args.identifier, args.log, args.poll
if __name__ == "__main__":
main()
|
PropertyMonitor.py
|
# ------------------------------------------------------------------
# imports
# ------------------------------------------------------------------
from shared.utils import HMILog
from shared.utils import tel_ids, redis_port
import threading
import time
from collections import Counter
import redis
import json
import jsonAcs
# Import ACS
import ACS__POA
from ACS import CBDescIn
from Acspy.Clients.SimpleClient import PySimpleClient
from Acspy.Common.Callbacks import CBvoid
# from Acspy.Common.Callbacks import CBdouble
from TelescopeStruct import props, GLOBAL_FREQ, QUEUE_FREQ
desc = CBDescIn(5L, 0L, 0L)
# ------------------------------------------------------------------
# Threading and Callback classes
# ------------------------------------------------------------------
class PollingThread(threading.Thread):
"""
class that defines the thread used for polling
"""
def __init__(self, *args, **kwargs):
super(PollingThread, self).__init__(*args, **kwargs)
self._stop = threading.Event()
# methods used for stopping the thread
def stop(self):
self._stop.set()
class MonitorCB(ACS__POA.CBdouble):
"""
class that defines the callback for the acs components
"""
def __init__(self, key):
self.log = HMILog(title=__name__)
self.key = key
self.r = redis.StrictRedis(host='localhost', port=6379, db=0)
def done(self, completion, desc, a):
"""
this method is called when monitoring ends and it writes to redis
"""
# sets the values into redis; title 10
self.r.setex(self.key, 10, json.dumps({'status': 'done', 'value': completion}))
self.log.info([['g', (" - PropertyMonitorLocal.done.%s - ") % (self.key)],
['p', completion]])
def working(self, completion, desc, a):
"""
this method is called when monitoring and it writes to redis
"""
self.r.setex(self.key, 10, json.dumps({'status': 'working', 'value': completion}))
self.log.info([['g', (" - PropertyMonitorLocal.working.%s - ") % (self.key)],
['p', completion]])
# Prefix constants
# global monitoring component prefix
COMPONENT_PREFIX_GLOBAL = 'ACS:G'
# local monitoring component prefix
COMPONENT_PREFIX_LOCAL = 'ACS:L'
# name of the queue used for monitoring
QUEUE_NAME = 'ACS:MonitorQueue'
# name of the queue used for polling
POLLING_QUEUE = 'ACS:PollingQueue'
# name of the queue used for querying from db
MONGO_QUEUE = 'ACS:MongoQueue'
# prefix of the dicts used for comparing the values in the db
DICT_SET_NAME = 'ACS:DictSet'
# the monitoring dictionary that contains the references of all active monitors
MONITOR_DICT = 'ACS:Monitor:Dict'
# Create a client and the ArraySupervisor component
client = PySimpleClient()
supervisor = client.getComponent("ArraySupervisor")
# Local property monitor dict
local_queue = Counter()
# dict used for storing active polling threads
polling_threads = dict()
# dict for storing references to the components
dict_of_components = dict()
# get the components
TEST_JAVA_T1 = client.getComponent("TEST_JAVA_T1")
TEST_JAVA_T2 = client.getComponent("TEST_JAVA_T2")
# you can add more here
# add the components to the dict
dict_of_components[TEST_JAVA_T1.name] = TEST_JAVA_T1
dict_of_components[TEST_JAVA_T2.name] = TEST_JAVA_T2
# ------------------------------------------------------------------
# PropertyMonitor classes
# ------------------------------------------------------------------
class PropertyMonitorQueue:
def __init__(self, site_type):
self.log = HMILog(title=__name__)
self.log.info([['y', " - PropertyMonitor Queue - "], ['g', site_type]])
self.site_type = site_type
self.tel_ids = tel_ids[site_type]
self.r = redis.StrictRedis(host='localhost', port=redis_port[site_type], db=0)
# to be on a safe side, clean the counter
self.r.delete(DICT_SET_NAME)
t = threading.Thread(target=self.queue_parser_loop)
# set the thread to daemon for quicker termination
t.daemon = True
t.start()
return
def queue_parser_loop(self):
"""
this method calls the queue_parser method every second
"""
while True:
self.queue_parser()
time.sleep(QUEUE_FREQ / 10000000)
def queue_parser(self):
"""
This method gets the list from the monitoring queue in redis. The values
# get parsed and put into a counter dict.
:return: a counter dict of components subscriptions
"""
#####
# Monitoring queue
#####
for _ in self.r.lrange(QUEUE_NAME, 0, 1000):
# Pop the element from queue and parse it
pop = self.r.rpop(QUEUE_NAME)
# split the pop'ed string
pop_command = pop.split(':', 1)[0]
pop_tel = pop.split(':', 1)[1]
pop_tel = "Monitor:" + pop_tel
# Depending on the prefix increment or decrement the counter
if pop_command == 'UNSUB':
self.log.info([['g', " - PropertyMonitorQueue.queue_parser.UNSUB - "],
['p', pop_tel]])
# Check if key value is lower than 0
if local_queue[pop_tel] <= 0:
local_queue.pop(pop_tel, None)
else:
local_queue[pop_tel] -= 1
else:
self.log.info([['g', " - PropertyMonitorQueue.queue_parser.SUB - "],
['p', pop_tel]])
local_queue[pop_tel] += 1
print local_queue
#####
# Polling queue
#####
for _ in self.r.lrange(POLLING_QUEUE, 0, 1000):
# Pop the element from queue and parse it
pop = self.r.rpop(POLLING_QUEUE)
pop_command = pop.split(':', 1)[0]
pop_tel = pop.split(':', 1)[1]
pop_tel = "Polling:" + pop_tel
# Depending on the prefix increment or decrement the counter
if pop_command == 'UNSUB':
# Check if key value is lower than 0
if local_queue[pop_tel] <= 0:
local_queue.pop(pop_tel, None)
else:
local_queue[pop_tel] -= 1
else:
local_queue[pop_tel] += 1
print local_queue
#####
# Database queue
#####
for _ in self.r.lrange(MONGO_QUEUE, 0, 1000):
# Pop the element from queue and parse it
pop = self.r.rpop(MONGO_QUEUE)
pop_command = pop.split(':', 1)[0]
pop_tel = pop.split(':', 1)[1]
pop_tel = "Mongo:" + pop_tel
# Depending on the prefix increment or decrement the counter
if pop_command == 'UNSUB':
# Check if key value is lower than 0
if local_queue[pop_tel] <= 0:
local_queue.pop(pop_tel, None)
else:
local_queue[pop_tel] -= 1
else:
local_queue[pop_tel] += 1
print local_queue
class PropertyMonitorGlobal:
def __init__(self, site_type):
self.log = HMILog(title=__name__)
self.log.info([['y', " - PropertyMonitor Global - "], ['g', site_type]])
self.site_type = site_type
self.tel_ids = tel_ids[site_type]
self.r = redis.StrictRedis(host='localhost', port=redis_port[site_type], db=0)
t = threading.Thread(target=self.tel_global_loop)
# set the thread to daemon for quicker termination
t.daemon = True
t.start()
return
def tel_global_loop(self):
"""
this method calls monitor_component_status every second
"""
while True:
self.monitor_component_status()
time.sleep(GLOBAL_FREQ / 10000000)
def monitor_component_status(self):
"""
This method monitors the global properties of the components and
# writes the values into redis.
:return:
"""
# for each component in the dict
for x in dict_of_components.keys():
# Build the component property dict
comp_prop_dict = dict()
# get the config for the global component
glob = props.get(x)
# for each property in the global component
for xy in glob["props"]:
# eval the pollin command and save to dict
comp_prop_dict[xy[0]] = eval(glob["component_name"] + [xy[1]][0])
self.log.info([[
'g', (" - PropertyMonitorGlobal.monitor_component_status.%s.%s - ") %
(glob["component_name"], xy[0])
], ['p', eval(glob["component_name"] + [xy[1]][0])]])
# Create key for the component
rkey = COMPONENT_PREFIX_GLOBAL + ':%s' % x
# Save the dict into redis
self.r.set(rkey, json.dumps(comp_prop_dict))
self.r.set(COMPONENT_PREFIX_GLOBAL, dict_of_components)
class PropertyMonitorLocal:
def __init__(self, site_type):
self.log = HMILog(title=__name__)
self.log.info([['y', " - PropertyMonitor Local - "], ['g', site_type]])
self.site_type = site_type
self.tel_ids = tel_ids[site_type]
self.r = redis.StrictRedis(host='localhost', port=redis_port[site_type], db=0)
t = threading.Thread(target=self.tel_local_loop)
# set the thread to daemon for quicker termination
t.daemon = True
t.start()
return
def get_redis(self):
"""
:return: the instance of the redis client
"""
return self.r
def tel_local_loop(self):
"""
this method calls monitor_component_properties every second
"""
while True:
self.monitor_component_properties()
time.sleep(1)
# Polling generator
def sub_polling(self, component, params, key, command):
"""
this method returns the code for getting the value of a specific prop in polling mode
:param component: the component that has the prop
:param params: dict of additional parameters
:param key: the key used to build the redis key
:param command: command used for polling
:return: code for getting a prop value in polling mode
"""
# create the string containing the code
command_str = "%s%s" % (component, command)
print "started polling " + key + " with frequency:" + \
str(params["polling_interval"])
# save the return value
while local_queue.get("Polling:" + key) > 0:
# eval the string and save the value
value = eval(command_str)
print key + ": " + str(value)
# save the value into redis
# Build local component key
rkey_local = COMPONENT_PREFIX_LOCAL + ':Polling:%s' % key
set_name = DICT_SET_NAME + ':%s' % key
# check if the value in redis different
if self.r.sadd(set_name, value):
# recreate the set
self.r.delete(set_name)
self.r.sadd(set_name, value)
# Push to local component key; TTL 10sec
self.r.setex(rkey_local, 10, value)
else:
continue
# sleep for x seconds where x is specified in the config
time.sleep(int(params["polling_interval"] / 10000000))
# Monitor generator
def sub_monitoring(self, component, params, command):
"""
this method creates a string that contains monitor creation code
:param component: the name of the component we are monitoring on
:param params: dict of params (default monitoring rate etc.
:param command: the command to create the monitor
:return: monitor creation string
"""
# creates monitor for the specified component and prop
mon_create = "mon=%s%s.create_monitor(client.activateOffShoot(cb), desc)" % (
component, command
)
# set the monitoring interval
mon_timer_trigger = "mon.set_timer_trigger(%d)" % int(
params["timer_trigger_interval"]
)
# create the final string that will be exec'ed
mon_setup = mon_create + "\n" + mon_timer_trigger + "\n" + \
"mon.set_value_trigger(%i, %s)" % (
params["value_trigger_delta"], params["is_monitor_value"])
return mon_setup
def create_monitor(self, rkey_local, key):
"""
spawn a new monitor in a greenlet
:param rkey_local: the key used for redis
:param key: component name to get the properties from the config
"""
cb = MonitorCB(rkey_local)
# creates the monitor from the generated string
exec(
self.sub_monitoring(
props[key]['component_name'],
props[key]["Monitor"]["additional_parameters"],
props[key]["Monitor"]["monitoring_command"]
)
)
# adds the reference to the newly created monitor to monitors dict
encoded_mon = jsonAcs.encode(mon)
# add the newly created monitor reference to the hset in redis
self.r.hset(MONITOR_DICT, key, encoded_mon)
def monitor_component_properties(self):
"""
This method monitors the local properties of a component.
# Monitoring occurs only for the components that has subs
listening. Monitoring can be done on three different ways
# (BACI, Polling or get history from MongoDB)
"""
for key in local_queue.keys():
# parse to get the property name without the prefix
monitor_key = key.split(':', 1)[1]
if local_queue[key] == 0 and monitor_key in polling_threads.keys():
# get the thread of the property
t = polling_threads.pop(monitor_key, None)
# stop the thread
t.stop()
print key + " thread removed."
# check if the property has a monitor when the counter reaches 0
if local_queue[key] == 0 and self.r.hexists(MONITOR_DICT, monitor_key):
# get the monitor from redis hset
redis_monitor = self.r.hget(MONITOR_DICT, monitor_key)
m = jsonAcs.decode(redis_monitor)
# destroy the monitor
m.destroy()
print key + " monitor removed."
# remove the monitor key from the hset in redis
self.r.hdel(MONITOR_DICT, monitor_key)
if local_queue[key] > 0:
# split the key to check what kind of monitoring is needed
key_prefix = key.split(':', 1)[0]
key = key.split(':', 1)[1]
# dict used for saving data to redis
tel_prop_dict = dict()
# when there are 0 subscribers to a key check if monitor exists
if key_prefix == "Monitor":
tel_prop_dict[key] = ""
# Build local component key
rkey_local = COMPONENT_PREFIX_LOCAL + ':Monitor:%s' % key
set_name = DICT_SET_NAME + ':%s' % key
# check the redis hset if the monitor exists
if not self.r.hexists(MONITOR_DICT, monitor_key):
self.create_monitor(rkey_local, key)
print "Added monitor for property " + key + "."
# check if the value in redis different
if self.r.sadd(set_name, json.dumps(tel_prop_dict)):
# recreate the set
self.r.delete(set_name)
self.r.sadd(set_name, json.dumps(tel_prop_dict))
# Push to local component key; TTL 10sec
self.r.setex(rkey_local, 10, json.dumps(tel_prop_dict))
else:
continue
elif key_prefix == "Polling":
# if a thread for the current property doesn't exist, create it
if key not in polling_threads.keys():
# create a polling thread
t = PollingThread(
target=self.sub_polling,
args=(
props[key]["component_name"],
props[key]["Polling"]["additional_parameters"], key,
props[key]["Polling"]["polling_command"]
)
)
polling_threads[key] = t
t.start()
# todo: not implemented yet
elif key_prefix == "Mongo":
print "DB not supported yet"
else:
print "unsupported monitoring"
|
solver.py
|
#
# Copyright 2021, NTT Communications Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
from collections import deque
from threading import Condition, Thread
from time import sleep
from typing import Callable, Dict, List, Optional
from urllib.request import Request, urlopen
from eth_typing import ChecksumAddress
from eth_utils.exceptions import ValidationError
from omegaconf.dictconfig import DictConfig
from requests.exceptions import HTTPError
from web3.datastructures import AttributeDict
from metemcyber.core.bc.account import Account
from metemcyber.core.bc.cti_operator import CTIOperator
from metemcyber.core.bc.eventlistener import BasicEventListener
from metemcyber.core.logger import get_logger
SIGNATURE_HEADER = 'Metemcyber-Signature'
QUEUE_DELAY_SEC = 2
LOGGER = get_logger(name='solver', file_prefix='core.bc')
class QueuedExecutor:
queue: deque
def __init__(self):
self.queue = deque() # deque is thread-safe
self.cond = Condition()
self.thread = Thread(target=self.run, daemon=True)
self.thread.start()
def destroy(self):
self.queue = None
self.cond.acquire()
self.cond.notify_all()
self.cond.release()
def enqueue(self, callback, *args, **kwargs) -> None:
self.cond.acquire()
self.queue.append((callback, args, kwargs))
self.cond.notify()
self.cond.release()
def run(self):
LOGGER.info(f'starting {self.__class__.__name__}.')
while True:
try:
callback, args, kwargs = self.queue.popleft()
callback(*args, **kwargs)
sleep(QUEUE_DELAY_SEC)
continue
except IndexError:
pass
except Exception as err:
LOGGER.exception(err)
continue
self.cond.acquire()
self.cond.wait()
self.cond.release()
if self.queue is None:
break
LOGGER.info(f'destructing {self.__class__.__name__}.')
class ChallengeListener(BasicEventListener):
def __init__(self, account: Account, operator: ChecksumAddress, event_name: str) -> None:
super().__init__(str(self))
# token_address: callback(token_address, event)
self.executor = QueuedExecutor()
self.accepting: Dict[ChecksumAddress, Callable[[ChecksumAddress, AttributeDict],
None]] = {}
event_filter = CTIOperator(account).get(
operator).event_filter(event_name, fromBlock='latest')
self.add_event_filter(f'{event_name}:{operator}', event_filter, self.dispatch_callback)
def destroy(self):
super().destroy()
self.executor.destroy()
def dispatch_callback(self, event: AttributeDict) -> None:
token_address = event['args']['token']
if token_address in self.accepting:
callback = self.accepting[token_address]
self.executor.enqueue(callback, token_address, event)
def accept_tokens(self, token_addresses: List[ChecksumAddress],
callback: Callable[[ChecksumAddress, AttributeDict], None]) -> None:
for address in token_addresses:
self.accepting[address] = callback
def refuse_tokens(self, token_addresses: List[ChecksumAddress]) -> None:
for address in token_addresses:
self.accepting.pop(address, None)
def list_accepting(self) -> List[ChecksumAddress]:
return list(self.accepting.keys())
class BaseSolver:
account: Account
listener: Optional[ChallengeListener]
config: DictConfig
def __init__(self, account: Account, config: DictConfig):
LOGGER.info('initializing solver %s for EOA %s', self, account.eoa)
self.account = account
self.listener = None
self.config = config
def destroy(self):
LOGGER.info('destructing solver %s for EOA %s', self, self.account.eoa)
if self.listener:
self.listener.destroy()
self.listener = None
@staticmethod # should be overwritten by subclass
def notify_first_accept():
return None
def accepting_tokens(self):
return self.listener.list_accepting() if self.listener else []
def accept_registered(self, tokens: Optional[List[ChecksumAddress]]):
LOGGER.info('accept_registered candidates: %s', tokens)
accepting = self.accepting_tokens()
cti_operator = CTIOperator(self.account).get(self.config.blockchain.operator.address)
if tokens is None: # auto detect mode
targets = cti_operator.list_registered(self.account.eoa)
else:
registered = cti_operator.check_registered(tokens)
targets = [
token for i, token in enumerate(tokens)
if registered[i] and token not in accepting]
if targets:
LOGGER.info('newly accepted: %s', targets)
msg = self._accept(targets, force_register=False)
self.reemit_pending_tasks(targets)
return msg
return None
def accept_challenges(self, tokens):
LOGGER.info('BaseSolver: accept tokens: %s', tokens)
accepting = self.accepting_tokens()
targets = [token for token in tokens if token not in accepting]
if targets:
msg = self._accept(targets, force_register=True)
self.reemit_pending_tasks(targets)
return msg
return None
def _accept(self, token_addresses, force_register=False):
if len(token_addresses) == 0:
return None
need_notify = \
self.listener is None or len(self.listener.accepting) == 0
if not self.listener:
self.listener = ChallengeListener(
self.account, self.config.blockchain.operator.address, 'TokensReceivedCalled')
self.listener.start()
self.listener.accept_tokens(token_addresses, self.process_challenge)
if force_register:
cti_operator = CTIOperator(self.account).get(self.config.blockchain.operator.address)
cti_operator.register_tokens(token_addresses)
return self.notify_first_accept() if need_notify else None
def refuse_challenges(self, tokens: List[ChecksumAddress]):
LOGGER.info('BaseSolver: refuse: %s', tokens)
targets = [t for t in tokens if t in self.accepting_tokens()]
if targets:
assert self.listener
self.listener.refuse_tokens(targets)
cti_operator = CTIOperator(self.account).get(self.config.blockchain.operator.address)
cti_operator.unregister_tokens(targets)
def accept_task(self, task_id):
try:
cti_operator = CTIOperator(self.account).get(self.config.blockchain.operator.address)
cti_operator.accept_task(task_id)
return True
except (HTTPError, ValueError, ValidationError) as err:
# another solver may accept faster than me.
LOGGER.error(err)
return False
def finish_task(self, task_id, data=''):
cti_operator = CTIOperator(self.account).get(self.config.blockchain.operator.address)
cti_operator.finish_task(task_id, data)
def reemit_pending_tasks(self, tokens):
cti_operator = CTIOperator(self.account).get(self.config.blockchain.operator.address)
cti_operator.reemit_pending_tasks(tokens)
@staticmethod
def process_challenge(_token_address, _event):
print('チャレンジの実装、または設定がありません')
# need your code as a plug-in. see plugins/*solver.py as examples.
# 1. preparation if needed.
# 2. accept_task. task_id is given in event['args']['taskId'].
# 3. your own process to solve request.
# 4. return result via webhook.
# url can be gotten by Web3.toText(event['args']['data'].
# 5. finish_task.
def webhook(self, webhook_url: str, download_url: str,
seeker: ChecksumAddress, task_id: int, token_address: ChecksumAddress
) -> None:
data_obj = {
"solver": self.account.eoa,
"seeker": seeker,
"task_id": task_id,
"token_address": token_address,
"download_url": download_url,
}
data = json.dumps(data_obj, sort_keys=True)
sign = self.account.sign_message(str(data))
headers = {"Content-Type": "application/json",
SIGNATURE_HEADER: sign}
# httpリクエストを準備してPOST
request = Request(webhook_url, data=data.encode('utf-8'), method="POST", headers=headers)
with urlopen(request) as response:
LOGGER.info(response.getcode())
LOGGER.debug(response.info())
|
lisp.py
|
# -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp.py
#
# This file contains all constants, definitions, data structures, packet
# send and receive functions for the LISP protocol according to RFC 6830.
#
#------------------------------------------------------------------------------
if 64 - 64: i11iIiiIii
import socket
import time
import struct
import binascii
import hmac
import hashlib
import datetime
import os
import sys
import random
import threading
import operator
import netifaces
import platform
import Queue
import traceback
from Crypto . Cipher import AES
import ecdsa
import json
import commands
import copy
import chacha
import poly1305
from geopy . distance import vincenty
import curve25519
use_chacha = ( os . getenv ( "LISP_USE_CHACHA" ) != None )
use_poly = ( os . getenv ( "LISP_USE_POLY" ) != None )
if 65 - 65: O0 / iIii1I11I1II1 % OoooooooOO - i1IIi
if 73 - 73: II111iiii
if 22 - 22: I1IiiI * Oo0Ooo / OoO0O00 . OoOoOO00 . o0oOOo0O0Ooo / I1ii11iIi11i
if 48 - 48: oO0o / OOooOOo / I11i / Ii1I
lisp_print_rloc_probe_list = False
if 48 - 48: iII111i % IiII + I1Ii111 / ooOoO0o * Ii1I
if 46 - 46: ooOoO0o * I11i - OoooooooOO
if 30 - 30: o0oOOo0O0Ooo - O0 % o0oOOo0O0Ooo - OoooooooOO * O0 * OoooooooOO
if 60 - 60: iIii1I11I1II1 / i1IIi * oO0o - I1ii11iIi11i + o0oOOo0O0Ooo
if 94 - 94: i1IIi % Oo0Ooo
if 68 - 68: Ii1I / O0
lisp_hostname = ""
lisp_version = ""
lisp_uptime = ""
lisp_i_am_core = False
lisp_i_am_itr = False
lisp_i_am_etr = False
lisp_i_am_rtr = False
lisp_i_am_mr = False
lisp_i_am_ms = False
lisp_i_am_ddt = False
lisp_log_id = ""
lisp_debug_logging = True
if 46 - 46: O0 * II111iiii / IiII * Oo0Ooo * iII111i . I11i
lisp_map_notify_queue = { }
lisp_map_servers_list = { }
lisp_ddt_map_requestQ = { }
lisp_db_list = [ ]
lisp_group_mapping_list = { }
lisp_map_resolvers_list = { }
lisp_rtr_list = { }
lisp_elp_list = { }
lisp_rle_list = { }
lisp_geo_list = { }
lisp_json_list = { }
lisp_myrlocs = [ None , None , None ]
lisp_mymacs = { }
if 62 - 62: i11iIiiIii - II111iiii % I1Ii111 - iIii1I11I1II1 . I1ii11iIi11i . II111iiii
if 61 - 61: oO0o / OoOoOO00 / iII111i * OoO0O00 . II111iiii
if 1 - 1: II111iiii - I1ii11iIi11i % i11iIiiIii + IiII . I1Ii111
if 55 - 55: iIii1I11I1II1 - I1IiiI . Ii1I * IiII * i1IIi / iIii1I11I1II1
if 79 - 79: oO0o + I1Ii111 . ooOoO0o * IiII % I11i . I1IiiI
lisp_myinterfaces = { }
lisp_iid_to_interface = { }
lisp_multi_tenant_interfaces = [ ]
if 94 - 94: iII111i * Ii1I / IiII . i1IIi * iII111i
lisp_test_mr_timer = None
lisp_rloc_probe_timer = None
if 47 - 47: i1IIi % i11iIiiIii
if 20 - 20: ooOoO0o * II111iiii
if 65 - 65: o0oOOo0O0Ooo * iIii1I11I1II1 * ooOoO0o
if 18 - 18: iIii1I11I1II1 / I11i + oO0o / Oo0Ooo - II111iiii - I11i
lisp_registered_count = 0
if 1 - 1: I11i - OOooOOo % O0 + I1IiiI - iII111i / I11i
if 31 - 31: OoO0O00 + II111iiii
if 13 - 13: OOooOOo * oO0o * I1IiiI
if 55 - 55: II111iiii
lisp_info_sources_by_address = { }
lisp_info_sources_by_nonce = { }
if 43 - 43: OoOoOO00 - i1IIi + I1Ii111 + Ii1I
if 17 - 17: o0oOOo0O0Ooo
if 64 - 64: Ii1I % i1IIi % OoooooooOO
if 3 - 3: iII111i + O0
if 42 - 42: OOooOOo / i1IIi + i11iIiiIii - Ii1I
if 78 - 78: OoO0O00
lisp_crypto_keys_by_nonce = { }
lisp_crypto_keys_by_rloc_encap = { }
lisp_crypto_keys_by_rloc_decap = { }
lisp_data_plane_security = False
lisp_search_decap_keys = True
if 18 - 18: O0 - iII111i / iII111i + ooOoO0o % ooOoO0o - IiII
lisp_data_plane_logging = False
lisp_frame_logging = False
lisp_flow_logging = False
if 62 - 62: iII111i - IiII - OoOoOO00 % i1IIi / oO0o
if 77 - 77: II111iiii - II111iiii . I1IiiI / o0oOOo0O0Ooo
if 14 - 14: I11i % O0
if 41 - 41: i1IIi + I1Ii111 + OOooOOo - IiII
if 77 - 77: Oo0Ooo . IiII % ooOoO0o
if 42 - 42: oO0o - i1IIi / i11iIiiIii + OOooOOo + OoO0O00
if 17 - 17: oO0o . Oo0Ooo . I1ii11iIi11i
lisp_crypto_ephem_port = None
if 3 - 3: OoOoOO00 . Oo0Ooo . I1IiiI / Ii1I
if 38 - 38: II111iiii % i11iIiiIii . ooOoO0o - OOooOOo + Ii1I
if 66 - 66: OoooooooOO * OoooooooOO . OOooOOo . i1IIi - OOooOOo
if 77 - 77: I11i - iIii1I11I1II1
lisp_pitr = False
if 82 - 82: i11iIiiIii . OOooOOo / Oo0Ooo * O0 % oO0o % iIii1I11I1II1
if 78 - 78: iIii1I11I1II1 - Ii1I * OoO0O00 + o0oOOo0O0Ooo + iII111i + iII111i
if 11 - 11: iII111i - OoO0O00 % ooOoO0o % iII111i / OoOoOO00 - OoO0O00
if 74 - 74: iII111i * O0
lisp_l2_overlay = False
if 89 - 89: oO0o + Oo0Ooo
if 3 - 3: i1IIi / I1IiiI % I11i * i11iIiiIii / O0 * I11i
if 49 - 49: oO0o % Ii1I + i1IIi . I1IiiI % I1ii11iIi11i
if 48 - 48: I11i + I11i / II111iiii / iIii1I11I1II1
if 20 - 20: o0oOOo0O0Ooo
lisp_rloc_probing = False
lisp_rloc_probe_list = { }
if 77 - 77: OoOoOO00 / I11i
if 98 - 98: iIii1I11I1II1 / i1IIi / i11iIiiIii / o0oOOo0O0Ooo
if 28 - 28: OOooOOo - IiII . IiII + OoOoOO00 - OoooooooOO + O0
if 95 - 95: OoO0O00 % oO0o . O0
if 15 - 15: ooOoO0o / Ii1I . Ii1I - i1IIi
if 53 - 53: IiII + I1IiiI * oO0o
lisp_register_all_rtrs = True
if 61 - 61: i1IIi * OOooOOo / OoooooooOO . i11iIiiIii . OoOoOO00
if 60 - 60: I11i / I11i
if 46 - 46: Ii1I * OOooOOo - OoO0O00 * oO0o - I1Ii111
if 83 - 83: OoooooooOO
lisp_nonce_echoing = False
lisp_nonce_echo_list = { }
if 31 - 31: II111iiii - OOooOOo . I1Ii111 % OoOoOO00 - O0
if 4 - 4: II111iiii / ooOoO0o . iII111i
if 58 - 58: OOooOOo * i11iIiiIii / OoOoOO00 % I1Ii111 - I1ii11iIi11i / oO0o
if 50 - 50: I1IiiI
lisp_nat_traversal = False
if 34 - 34: I1IiiI * II111iiii % iII111i * OoOoOO00 - I1IiiI
if 33 - 33: o0oOOo0O0Ooo + OOooOOo * OoO0O00 - Oo0Ooo / oO0o % Ii1I
if 21 - 21: OoO0O00 * iIii1I11I1II1 % oO0o * i1IIi
if 16 - 16: O0 - I1Ii111 * iIii1I11I1II1 + iII111i
if 50 - 50: II111iiii - ooOoO0o * I1ii11iIi11i / I1Ii111 + o0oOOo0O0Ooo
if 88 - 88: Ii1I / I1Ii111 + iII111i - II111iiii / ooOoO0o - OoOoOO00
if 15 - 15: I1ii11iIi11i + OoOoOO00 - OoooooooOO / OOooOOo
if 58 - 58: i11iIiiIii % I11i
lisp_program_hardware = False
if 71 - 71: OOooOOo + ooOoO0o % i11iIiiIii + I1ii11iIi11i - IiII
if 88 - 88: OoOoOO00 - OoO0O00 % OOooOOo
if 16 - 16: I1IiiI * oO0o % IiII
if 86 - 86: I1IiiI + Ii1I % i11iIiiIii * oO0o . ooOoO0o * I11i
lisp_checkpoint_map_cache = False
lisp_checkpoint_filename = "./lisp.checkpoint"
if 44 - 44: oO0o
if 88 - 88: I1Ii111 % Ii1I . II111iiii
if 38 - 38: o0oOOo0O0Ooo
if 57 - 57: O0 / oO0o * I1Ii111 / OoOoOO00 . II111iiii
lisp_ipc_data_plane = False
lisp_ipc_dp_socket = None
lisp_ipc_dp_socket_name = "lisp-ipc-data-plane"
if 26 - 26: iII111i
if 91 - 91: OoO0O00 . I1ii11iIi11i + OoO0O00 - iII111i / OoooooooOO
if 39 - 39: I1ii11iIi11i / ooOoO0o - II111iiii
if 98 - 98: I1ii11iIi11i / I11i % oO0o . OoOoOO00
if 91 - 91: oO0o % Oo0Ooo
lisp_ipc_lock = None
if 64 - 64: I11i % iII111i - I1Ii111 - oO0o
if 31 - 31: I11i - II111iiii . I11i
if 18 - 18: o0oOOo0O0Ooo
if 98 - 98: iII111i * iII111i / iII111i + I11i
if 34 - 34: ooOoO0o
if 15 - 15: I11i * ooOoO0o * Oo0Ooo % i11iIiiIii % OoOoOO00 - OOooOOo
lisp_default_iid = 0
if 68 - 68: I1Ii111 % i1IIi . IiII . I1ii11iIi11i
if 92 - 92: iII111i . I1Ii111
if 31 - 31: I1Ii111 . OoOoOO00 / O0
if 89 - 89: OoOoOO00
if 68 - 68: OoO0O00 * OoooooooOO % O0 + OoO0O00 + ooOoO0o
lisp_ms_rtr_list = [ ]
if 4 - 4: ooOoO0o + O0 * OOooOOo
if 55 - 55: Oo0Ooo + iIii1I11I1II1 / OoOoOO00 * oO0o - i11iIiiIii - Ii1I
if 25 - 25: I1ii11iIi11i
if 7 - 7: i1IIi / I1IiiI * I1Ii111 . IiII . iIii1I11I1II1
if 13 - 13: OOooOOo / i11iIiiIii
if 2 - 2: I1IiiI / O0 / o0oOOo0O0Ooo % OoOoOO00 % Ii1I
lisp_nat_state_info = { }
if 52 - 52: o0oOOo0O0Ooo
if 95 - 95: Ii1I
if 87 - 87: ooOoO0o + OoOoOO00 . OOooOOo + OoOoOO00
if 91 - 91: O0
lisp_last_map_request_sent = None
if 61 - 61: II111iiii
if 64 - 64: ooOoO0o / OoOoOO00 - O0 - I11i
if 86 - 86: I11i % OoOoOO00 / I1IiiI / OoOoOO00
if 42 - 42: OoO0O00
LISP_FLOW_LOG_SIZE = 100
lisp_flow_log = [ ]
if 67 - 67: I1Ii111 . iII111i . O0
if 10 - 10: I1ii11iIi11i % I1ii11iIi11i - iIii1I11I1II1 / OOooOOo + Ii1I
if 87 - 87: oO0o * I1ii11iIi11i + OOooOOo / iIii1I11I1II1 / iII111i
if 37 - 37: iII111i - ooOoO0o * oO0o % i11iIiiIii - I1Ii111
lisp_policies = { }
if 83 - 83: I11i / I1IiiI
if 34 - 34: IiII
if 57 - 57: oO0o . I11i . i1IIi
if 42 - 42: I11i + I1ii11iIi11i % O0
if 6 - 6: oO0o
lisp_load_split_pings = False
if 68 - 68: OoOoOO00 - OoO0O00
if 28 - 28: OoO0O00 . OOooOOo / OOooOOo + Oo0Ooo . I1ii11iIi11i
if 1 - 1: iIii1I11I1II1 / II111iiii
if 33 - 33: I11i
if 18 - 18: o0oOOo0O0Ooo % iII111i * O0
if 87 - 87: i11iIiiIii
lisp_eid_hashes = [ ]
if 93 - 93: I1ii11iIi11i - OoO0O00 % i11iIiiIii . iII111i / iII111i - I1Ii111
if 9 - 9: I1ii11iIi11i / Oo0Ooo - I1IiiI / OoooooooOO / iIii1I11I1II1 - o0oOOo0O0Ooo
if 91 - 91: iII111i % i1IIi % iIii1I11I1II1
if 20 - 20: OOooOOo % Ii1I / Ii1I + Ii1I
if 45 - 45: oO0o - IiII - OoooooooOO - OoO0O00 . II111iiii / O0
if 51 - 51: O0 + iII111i
if 8 - 8: oO0o * OoOoOO00 - Ii1I - OoO0O00 * OOooOOo % I1IiiI
if 48 - 48: O0
lisp_reassembly_queue = { }
if 11 - 11: I11i + OoooooooOO - OoO0O00 / o0oOOo0O0Ooo + Oo0Ooo . II111iiii
if 41 - 41: Ii1I - O0 - O0
if 68 - 68: OOooOOo % I1Ii111
if 88 - 88: iIii1I11I1II1 - ooOoO0o + OOooOOo
if 40 - 40: I1IiiI * Ii1I + OOooOOo % iII111i
if 74 - 74: oO0o - Oo0Ooo + OoooooooOO + I1Ii111 / OoOoOO00
if 23 - 23: O0
lisp_pubsub_cache = { }
if 85 - 85: Ii1I
if 84 - 84: I1IiiI . iIii1I11I1II1 % OoooooooOO + Ii1I % OoooooooOO % OoO0O00
if 42 - 42: OoO0O00 / I11i / o0oOOo0O0Ooo + iII111i / OoOoOO00
if 84 - 84: ooOoO0o * II111iiii + Oo0Ooo
if 53 - 53: iII111i % II111iiii . IiII - iIii1I11I1II1 - IiII * II111iiii
if 77 - 77: iIii1I11I1II1 * OoO0O00
lisp_decent_push_configured = False
if 95 - 95: I1IiiI + i11iIiiIii
if 6 - 6: ooOoO0o / i11iIiiIii + iII111i * oO0o
if 80 - 80: II111iiii
if 83 - 83: I11i . i11iIiiIii + II111iiii . o0oOOo0O0Ooo * I11i
if 53 - 53: II111iiii
if 31 - 31: OoO0O00
lisp_decent_modulus = 0
lisp_decent_dns_suffix = None
if 80 - 80: I1Ii111 . i11iIiiIii - o0oOOo0O0Ooo
if 25 - 25: OoO0O00
if 62 - 62: OOooOOo + O0
if 98 - 98: o0oOOo0O0Ooo
if 51 - 51: Oo0Ooo - oO0o + II111iiii * Ii1I . I11i + oO0o
if 78 - 78: i11iIiiIii / iII111i - Ii1I / OOooOOo + oO0o
lisp_ipc_socket = None
if 82 - 82: Ii1I
if 46 - 46: OoooooooOO . i11iIiiIii
if 94 - 94: o0oOOo0O0Ooo * Ii1I / Oo0Ooo / Ii1I
if 87 - 87: Oo0Ooo . IiII
lisp_ms_encryption_keys = { }
if 75 - 75: ooOoO0o + OoOoOO00 + o0oOOo0O0Ooo * I11i % oO0o . iII111i
if 55 - 55: OOooOOo . I1IiiI
if 61 - 61: Oo0Ooo % IiII . Oo0Ooo
if 100 - 100: I1Ii111 * O0
if 64 - 64: OOooOOo % iIii1I11I1II1 * oO0o
if 79 - 79: O0
if 78 - 78: I1ii11iIi11i + OOooOOo - I1Ii111
if 38 - 38: o0oOOo0O0Ooo - oO0o + iIii1I11I1II1 / OoOoOO00 % Oo0Ooo
if 57 - 57: OoO0O00 / ooOoO0o
if 29 - 29: iIii1I11I1II1 + OoOoOO00 * OoO0O00 * OOooOOo . I1IiiI * I1IiiI
if 7 - 7: IiII * I1Ii111 % Ii1I - o0oOOo0O0Ooo
if 13 - 13: Ii1I . i11iIiiIii
if 56 - 56: I1ii11iIi11i % O0 - I1IiiI
if 100 - 100: Ii1I - O0 % oO0o * OOooOOo + I1IiiI
lisp_rtr_nat_trace_cache = { }
if 88 - 88: OoooooooOO - OoO0O00 * O0 * OoooooooOO . OoooooooOO
if 33 - 33: I1Ii111 + iII111i * oO0o / iIii1I11I1II1 - I1IiiI
if 54 - 54: I1Ii111 / OOooOOo . oO0o % iII111i
if 57 - 57: i11iIiiIii . I1ii11iIi11i - Ii1I - oO0o + OoOoOO00
if 63 - 63: OoOoOO00 * iII111i
if 69 - 69: O0 . OoO0O00
if 49 - 49: I1IiiI - I11i
if 74 - 74: iIii1I11I1II1 * I1ii11iIi11i + OoOoOO00 / i1IIi / II111iiii . Oo0Ooo
if 62 - 62: OoooooooOO * I1IiiI
lisp_glean_mappings = [ ]
if 58 - 58: OoOoOO00 % o0oOOo0O0Ooo
if 50 - 50: I1Ii111 . o0oOOo0O0Ooo
if 97 - 97: O0 + OoOoOO00
if 89 - 89: o0oOOo0O0Ooo + OoO0O00 * I11i * Ii1I
if 37 - 37: OoooooooOO - O0 - o0oOOo0O0Ooo
if 77 - 77: OOooOOo * iIii1I11I1II1
LISP_DATA_PORT = 4341
LISP_CTRL_PORT = 4342
LISP_L2_DATA_PORT = 8472
LISP_VXLAN_DATA_PORT = 4789
LISP_VXLAN_GPE_PORT = 4790
LISP_TRACE_PORT = 2434
if 98 - 98: I1IiiI % Ii1I * OoooooooOO
if 51 - 51: iIii1I11I1II1 . OoOoOO00 / oO0o + o0oOOo0O0Ooo
if 33 - 33: ooOoO0o . II111iiii % iII111i + o0oOOo0O0Ooo
if 71 - 71: Oo0Ooo % OOooOOo
LISP_MAP_REQUEST = 1
LISP_MAP_REPLY = 2
LISP_MAP_REGISTER = 3
LISP_MAP_NOTIFY = 4
LISP_MAP_NOTIFY_ACK = 5
LISP_MAP_REFERRAL = 6
LISP_NAT_INFO = 7
LISP_ECM = 8
LISP_TRACE = 9
if 98 - 98: I11i % i11iIiiIii % ooOoO0o + Ii1I
if 78 - 78: I1ii11iIi11i % oO0o / iII111i - iIii1I11I1II1
if 69 - 69: I1Ii111
if 11 - 11: I1IiiI
LISP_NO_ACTION = 0
LISP_NATIVE_FORWARD_ACTION = 1
LISP_SEND_MAP_REQUEST_ACTION = 2
LISP_DROP_ACTION = 3
LISP_POLICY_DENIED_ACTION = 4
LISP_AUTH_FAILURE_ACTION = 5
if 16 - 16: Ii1I + IiII * O0 % i1IIi . I1IiiI
lisp_map_reply_action_string = [ "no-action" , "native-forward" ,
"send-map-request" , "drop-action" , "policy-denied" , "auth-failure" ]
if 67 - 67: OoooooooOO / I1IiiI * Ii1I + I11i
if 65 - 65: OoooooooOO - I1ii11iIi11i / ooOoO0o / II111iiii / i1IIi
if 71 - 71: I1Ii111 + Ii1I
if 28 - 28: OOooOOo
LISP_NONE_ALG_ID = 0
LISP_SHA_1_96_ALG_ID = 1
LISP_SHA_256_128_ALG_ID = 2
LISP_MD5_AUTH_DATA_LEN = 16
LISP_SHA1_160_AUTH_DATA_LEN = 20
LISP_SHA2_256_AUTH_DATA_LEN = 32
if 38 - 38: ooOoO0o % II111iiii % I11i / OoO0O00 + OoOoOO00 / i1IIi
if 54 - 54: iIii1I11I1II1 % I1ii11iIi11i - OOooOOo / oO0o - OoO0O00 . I11i
if 11 - 11: I1ii11iIi11i . OoO0O00 * IiII * OoooooooOO + ooOoO0o
if 33 - 33: O0 * o0oOOo0O0Ooo - I1Ii111 % I1Ii111
LISP_LCAF_NULL_TYPE = 0
LISP_LCAF_AFI_LIST_TYPE = 1
LISP_LCAF_INSTANCE_ID_TYPE = 2
LISP_LCAF_ASN_TYPE = 3
LISP_LCAF_APP_DATA_TYPE = 4
LISP_LCAF_GEO_COORD_TYPE = 5
LISP_LCAF_OPAQUE_TYPE = 6
LISP_LCAF_NAT_TYPE = 7
LISP_LCAF_NONCE_LOC_TYPE = 8
LISP_LCAF_MCAST_INFO_TYPE = 9
LISP_LCAF_ELP_TYPE = 10
LISP_LCAF_SECURITY_TYPE = 11
LISP_LCAF_SOURCE_DEST_TYPE = 12
LISP_LCAF_RLE_TYPE = 13
LISP_LCAF_JSON_TYPE = 14
LISP_LCAF_KV_TYPE = 15
LISP_LCAF_ENCAP_TYPE = 16
if 18 - 18: I1Ii111 / Oo0Ooo * I1Ii111 + I1Ii111 * i11iIiiIii * I1ii11iIi11i
if 11 - 11: ooOoO0o / OoOoOO00 - IiII * OoooooooOO + OoooooooOO . OoOoOO00
if 26 - 26: Ii1I % I1ii11iIi11i
if 76 - 76: IiII * iII111i
LISP_MR_TTL = ( 24 * 60 )
LISP_REGISTER_TTL = 3
LISP_SHORT_TTL = 1
LISP_NMR_TTL = 15
LISP_GLEAN_TTL = 15
if 52 - 52: OOooOOo
LISP_SITE_TIMEOUT_CHECK_INTERVAL = 60
LISP_PUBSUB_TIMEOUT_CHECK_INTERVAL = 60
LISP_REFERRAL_TIMEOUT_CHECK_INTERVAL = 60
LISP_TEST_MR_INTERVAL = 60
LISP_MAP_NOTIFY_INTERVAL = 2
LISP_DDT_MAP_REQUEST_INTERVAL = 2
LISP_MAX_MAP_NOTIFY_RETRIES = 3
LISP_INFO_INTERVAL = 15
LISP_MAP_REQUEST_RATE_LIMIT = 5
if 19 - 19: I1IiiI
LISP_RLOC_PROBE_TTL = 64
LISP_RLOC_PROBE_INTERVAL = 10
LISP_RLOC_PROBE_REPLY_WAIT = 15
if 25 - 25: Ii1I / ooOoO0o
LISP_DEFAULT_DYN_EID_TIMEOUT = 15
LISP_NONCE_ECHO_INTERVAL = 10
if 31 - 31: OOooOOo . O0 % I1IiiI . o0oOOo0O0Ooo + IiII
if 71 - 71: I1Ii111 . II111iiii
if 62 - 62: OoooooooOO . I11i
if 61 - 61: OoOoOO00 - OOooOOo - i1IIi
if 25 - 25: O0 * I11i + I1ii11iIi11i . o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 58 - 58: I1IiiI
if 53 - 53: i1IIi
if 59 - 59: o0oOOo0O0Ooo
if 81 - 81: OoOoOO00 - OoOoOO00 . iII111i
if 73 - 73: I11i % i11iIiiIii - I1IiiI
if 7 - 7: O0 * i11iIiiIii * Ii1I + ooOoO0o % OoO0O00 - ooOoO0o
if 39 - 39: Oo0Ooo * OOooOOo % OOooOOo - OoooooooOO + o0oOOo0O0Ooo - I11i
if 23 - 23: i11iIiiIii
if 30 - 30: o0oOOo0O0Ooo - i1IIi % II111iiii + I11i * iIii1I11I1II1
if 81 - 81: IiII % i1IIi . iIii1I11I1II1
if 4 - 4: i11iIiiIii % OoO0O00 % i1IIi / IiII
if 6 - 6: iII111i / I1IiiI % OOooOOo - I1IiiI
if 31 - 31: OOooOOo
if 23 - 23: I1Ii111 . IiII
if 92 - 92: OoOoOO00 + I1Ii111 * Ii1I % I1IiiI
if 42 - 42: Oo0Ooo
if 76 - 76: I1IiiI * iII111i % I1Ii111
if 57 - 57: iIii1I11I1II1 - i1IIi / I1Ii111 - O0 * OoooooooOO % II111iiii
if 68 - 68: OoooooooOO * I11i % OoOoOO00 - IiII
if 34 - 34: I1Ii111 . iIii1I11I1II1 * OoOoOO00 * oO0o / I1Ii111 / I1ii11iIi11i
if 78 - 78: Oo0Ooo - o0oOOo0O0Ooo / OoOoOO00
if 10 - 10: iII111i + Oo0Ooo * I1ii11iIi11i + iIii1I11I1II1 / I1Ii111 / I1ii11iIi11i
if 42 - 42: I1IiiI
if 38 - 38: OOooOOo + II111iiii % ooOoO0o % OoOoOO00 - Ii1I / OoooooooOO
if 73 - 73: o0oOOo0O0Ooo * O0 - i11iIiiIii
if 85 - 85: Ii1I % iII111i + I11i / o0oOOo0O0Ooo . oO0o + OOooOOo
if 62 - 62: i11iIiiIii + i11iIiiIii - o0oOOo0O0Ooo
if 28 - 28: iII111i . iII111i % iIii1I11I1II1 * iIii1I11I1II1 . o0oOOo0O0Ooo / iII111i
if 27 - 27: OoO0O00 + ooOoO0o - i1IIi
if 69 - 69: IiII - O0 % I1ii11iIi11i + i11iIiiIii . OoOoOO00 / OoO0O00
if 79 - 79: O0 * i11iIiiIii - IiII / IiII
if 48 - 48: O0
if 93 - 93: i11iIiiIii - I1IiiI * I1ii11iIi11i * I11i % O0 + OoooooooOO
if 25 - 25: IiII + Ii1I / ooOoO0o . o0oOOo0O0Ooo % O0 * OoO0O00
if 84 - 84: ooOoO0o % Ii1I + i11iIiiIii
if 28 - 28: Oo0Ooo + OoO0O00 * OOooOOo % oO0o . I11i % O0
if 16 - 16: I11i - iIii1I11I1II1 / I1IiiI . II111iiii + iIii1I11I1II1
if 19 - 19: OoO0O00 - Oo0Ooo . O0
if 60 - 60: II111iiii + Oo0Ooo
if 9 - 9: ooOoO0o * OoooooooOO - iIii1I11I1II1 + OoOoOO00 / OoO0O00 . OoO0O00
if 49 - 49: II111iiii
if 25 - 25: OoooooooOO - I1IiiI . I1IiiI * oO0o
if 81 - 81: iII111i + IiII
if 98 - 98: I1IiiI
LISP_CS_1024 = 0
LISP_CS_1024_G = 2
LISP_CS_1024_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
if 95 - 95: ooOoO0o / ooOoO0o
LISP_CS_2048_CBC = 1
LISP_CS_2048_CBC_G = 2
LISP_CS_2048_CBC_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
if 30 - 30: I1ii11iIi11i + Oo0Ooo / Oo0Ooo % I1ii11iIi11i . I1ii11iIi11i
LISP_CS_25519_CBC = 2
LISP_CS_2048_GCM = 3
if 55 - 55: ooOoO0o - I11i + II111iiii + iII111i % Ii1I
LISP_CS_3072 = 4
LISP_CS_3072_G = 2
LISP_CS_3072_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF
if 41 - 41: i1IIi - I11i - Ii1I
LISP_CS_25519_GCM = 5
LISP_CS_25519_CHACHA = 6
if 8 - 8: OoO0O00 + I1Ii111 - o0oOOo0O0Ooo % Oo0Ooo % o0oOOo0O0Ooo * oO0o
LISP_4_32_MASK = 0xFFFFFFFF
LISP_8_64_MASK = 0xFFFFFFFFFFFFFFFF
LISP_16_128_MASK = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
if 9 - 9: Oo0Ooo - i11iIiiIii - OOooOOo * Ii1I + ooOoO0o
if 44 - 44: II111iiii
if 52 - 52: I1ii11iIi11i - Oo0Ooo + I1ii11iIi11i % o0oOOo0O0Ooo
if 35 - 35: iIii1I11I1II1
if 42 - 42: I1Ii111 . I1IiiI . i1IIi + OoOoOO00 + OOooOOo + I1IiiI
if 31 - 31: iII111i . OOooOOo - ooOoO0o . OoooooooOO / OoooooooOO
if 56 - 56: OoO0O00 / oO0o / i11iIiiIii + OoooooooOO - Oo0Ooo - I11i
if 21 - 21: O0 % IiII . I1IiiI / II111iiii + IiII
def lisp_record_traceback ( * args ) :
OOOO0O00o = datetime . datetime . now ( ) . strftime ( "%m/%d/%y %H:%M:%S.%f" ) [ : - 3 ]
ooo = open ( "./logs/lisp-traceback.log" , "a" )
ooo . write ( "---------- Exception occurred: {} ----------\n" . format ( OOOO0O00o ) )
try :
traceback . print_last ( file = ooo )
except :
ooo . write ( "traceback.print_last(file=fd) failed" )
if 19 - 19: OoO0O00 - Oo0Ooo . oO0o / oO0o % ooOoO0o
try :
traceback . print_last ( )
except :
print ( "traceback.print_last() failed" )
if 56 - 56: I1IiiI . O0 + Oo0Ooo
ooo . close ( )
return
if 1 - 1: iII111i
if 97 - 97: OOooOOo + iII111i + O0 + i11iIiiIii
if 77 - 77: o0oOOo0O0Ooo / OoooooooOO
if 46 - 46: o0oOOo0O0Ooo % iIii1I11I1II1 . iII111i % iII111i + i11iIiiIii
if 72 - 72: iIii1I11I1II1 * Ii1I % ooOoO0o / OoO0O00
if 35 - 35: ooOoO0o + i1IIi % I1ii11iIi11i % I11i + oO0o
if 17 - 17: i1IIi
def lisp_set_exception ( ) :
sys . excepthook = lisp_record_traceback
return
if 21 - 21: Oo0Ooo
if 29 - 29: I11i / II111iiii / ooOoO0o * OOooOOo
if 10 - 10: I1Ii111 % IiII * IiII . I11i / Ii1I % OOooOOo
if 49 - 49: OoO0O00 / oO0o + O0 * o0oOOo0O0Ooo
if 28 - 28: ooOoO0o + i11iIiiIii / I11i % OoOoOO00 % Oo0Ooo - O0
if 54 - 54: i1IIi + II111iiii
if 83 - 83: I1ii11iIi11i - I1IiiI + OOooOOo
def lisp_is_raspbian ( ) :
if ( platform . dist ( ) [ 0 ] != "debian" ) : return ( False )
return ( platform . machine ( ) in [ "armv6l" , "armv7l" ] )
if 5 - 5: Ii1I
if 46 - 46: IiII
if 45 - 45: ooOoO0o
if 21 - 21: oO0o . I1Ii111 . OOooOOo / Oo0Ooo / I1Ii111
if 17 - 17: OOooOOo / OOooOOo / I11i
if 1 - 1: i1IIi . i11iIiiIii % OOooOOo
if 82 - 82: iIii1I11I1II1 + Oo0Ooo . iIii1I11I1II1 % IiII / Ii1I . Ii1I
def lisp_is_ubuntu ( ) :
return ( platform . dist ( ) [ 0 ] == "Ubuntu" )
if 14 - 14: o0oOOo0O0Ooo . OOooOOo . I11i + OoooooooOO - OOooOOo + IiII
if 9 - 9: Ii1I
if 59 - 59: I1IiiI * II111iiii . O0
if 56 - 56: Ii1I - iII111i % I1IiiI - o0oOOo0O0Ooo
if 51 - 51: O0 / ooOoO0o * iIii1I11I1II1 + I1ii11iIi11i + o0oOOo0O0Ooo
if 98 - 98: iIii1I11I1II1 * I1ii11iIi11i * OOooOOo + ooOoO0o % i11iIiiIii % O0
if 27 - 27: O0
def lisp_is_fedora ( ) :
return ( platform . dist ( ) [ 0 ] == "fedora" )
if 79 - 79: o0oOOo0O0Ooo - I11i + o0oOOo0O0Ooo . oO0o
if 28 - 28: i1IIi - iII111i
if 54 - 54: iII111i - O0 % OOooOOo
if 73 - 73: O0 . OoOoOO00 + I1IiiI - I11i % I11i . I11i
if 17 - 17: Ii1I - OoooooooOO % Ii1I . IiII / i11iIiiIii % iII111i
if 28 - 28: I11i
if 58 - 58: OoOoOO00
def lisp_is_centos ( ) :
return ( platform . dist ( ) [ 0 ] == "centos" )
if 37 - 37: Oo0Ooo - iIii1I11I1II1 / I1ii11iIi11i
if 73 - 73: i11iIiiIii - IiII
if 25 - 25: OoooooooOO + IiII * I1ii11iIi11i
if 92 - 92: I1IiiI + I11i + O0 / o0oOOo0O0Ooo + I1Ii111
if 18 - 18: ooOoO0o * OoOoOO00 . iII111i / I1ii11iIi11i / i11iIiiIii
if 21 - 21: oO0o / I1ii11iIi11i + Ii1I + OoooooooOO
if 91 - 91: i11iIiiIii / i1IIi + iII111i + ooOoO0o * i11iIiiIii
def lisp_is_debian ( ) :
return ( platform . dist ( ) [ 0 ] == "debian" )
if 66 - 66: iIii1I11I1II1 % i1IIi - O0 + I11i * I1Ii111 . IiII
if 52 - 52: ooOoO0o + O0 . iII111i . I1ii11iIi11i . OoO0O00
if 97 - 97: I1IiiI / iII111i
if 71 - 71: II111iiii / i1IIi . I1ii11iIi11i % OoooooooOO . OoOoOO00
if 41 - 41: i1IIi * II111iiii / OoooooooOO . OOooOOo
if 83 - 83: iII111i . O0 / Oo0Ooo / OOooOOo - II111iiii
if 100 - 100: OoO0O00
def lisp_is_debian_kali ( ) :
return ( platform . dist ( ) [ 0 ] == "Kali" )
if 46 - 46: OoOoOO00 / iIii1I11I1II1 % iII111i . iIii1I11I1II1 * iII111i
if 38 - 38: I1ii11iIi11i - iII111i / O0 . I1Ii111
if 45 - 45: I1Ii111
if 83 - 83: OoOoOO00 . OoooooooOO
if 58 - 58: i11iIiiIii + OoooooooOO % OoooooooOO / IiII / i11iIiiIii
if 62 - 62: OoO0O00 / I1ii11iIi11i
if 7 - 7: OoooooooOO . IiII
def lisp_is_macos ( ) :
return ( platform . uname ( ) [ 0 ] == "Darwin" )
if 53 - 53: Ii1I % Ii1I * o0oOOo0O0Ooo + OoOoOO00
if 92 - 92: OoooooooOO + i1IIi / Ii1I * O0
if 100 - 100: ooOoO0o % iIii1I11I1II1 * II111iiii - iII111i
if 92 - 92: ooOoO0o
if 22 - 22: Oo0Ooo % iII111i * I1ii11iIi11i / OOooOOo % i11iIiiIii * I11i
if 95 - 95: OoooooooOO - IiII * I1IiiI + OoOoOO00
if 10 - 10: o0oOOo0O0Ooo / i11iIiiIii
def lisp_is_alpine ( ) :
return ( os . path . exists ( "/etc/alpine-release" ) )
if 92 - 92: I11i . I1Ii111
if 85 - 85: I1ii11iIi11i . I1Ii111
if 78 - 78: ooOoO0o * I1Ii111 + iIii1I11I1II1 + iIii1I11I1II1 / I1Ii111 . Ii1I
if 97 - 97: ooOoO0o / I1Ii111 % i1IIi % I1ii11iIi11i
if 18 - 18: iIii1I11I1II1 % I11i
if 95 - 95: ooOoO0o + i11iIiiIii * I1Ii111 - i1IIi * I1Ii111 - iIii1I11I1II1
if 75 - 75: OoooooooOO * IiII
def lisp_is_x86 ( ) :
I1Iiiiiii = platform . machine ( )
return ( I1Iiiiiii in ( "x86" , "i686" , "x86_64" ) )
if 39 - 39: IiII * Oo0Ooo + iIii1I11I1II1 - IiII + OOooOOo
if 69 - 69: O0
if 85 - 85: ooOoO0o / O0
if 18 - 18: o0oOOo0O0Ooo % O0 * I1ii11iIi11i
if 62 - 62: I1Ii111 . IiII . OoooooooOO
if 11 - 11: OOooOOo / I11i
if 73 - 73: i1IIi / i11iIiiIii
def lisp_is_linux ( ) :
return ( platform . uname ( ) [ 0 ] == "Linux" )
if 58 - 58: Oo0Ooo . II111iiii + oO0o - i11iIiiIii / II111iiii / O0
if 85 - 85: OoOoOO00 + OOooOOo
if 10 - 10: IiII / OoO0O00 + OoOoOO00 / i1IIi
if 27 - 27: Ii1I
if 67 - 67: I1IiiI
if 55 - 55: I1ii11iIi11i - iII111i * o0oOOo0O0Ooo + OoOoOO00 * OoOoOO00 * O0
if 91 - 91: I1Ii111 - OOooOOo % iIii1I11I1II1 - OoooooooOO % ooOoO0o
def lisp_on_aws ( ) :
OO0 = commands . getoutput ( "sudo dmidecode -s bios-version" )
return ( OO0 . lower ( ) . find ( "amazon" ) != - 1 )
if 44 - 44: iII111i - I1Ii111 / O0 * Oo0Ooo + II111iiii / OoOoOO00
if 88 - 88: o0oOOo0O0Ooo - OoO0O00 + I1ii11iIi11i . I1Ii111 % I1Ii111
if 57 - 57: II111iiii
if 54 - 54: Oo0Ooo + oO0o + i11iIiiIii
if 28 - 28: oO0o
if 70 - 70: IiII
if 34 - 34: I1Ii111 % IiII
def lisp_on_gcp ( ) :
OO0 = commands . getoutput ( "sudo dmidecode -s bios-version" )
return ( OO0 . lower ( ) . find ( "google" ) != - 1 )
if 3 - 3: II111iiii / OOooOOo + IiII . ooOoO0o . OoO0O00
if 83 - 83: oO0o + OoooooooOO
if 22 - 22: Ii1I % iII111i * OoooooooOO - o0oOOo0O0Ooo / iIii1I11I1II1
if 86 - 86: OoooooooOO . iII111i % OoOoOO00 / I11i * iII111i / o0oOOo0O0Ooo
if 64 - 64: i11iIiiIii
if 38 - 38: IiII / I1IiiI - IiII . I11i
if 69 - 69: OoooooooOO + I1ii11iIi11i
if 97 - 97: OOooOOo - OoO0O00 / Ii1I . i11iIiiIii % oO0o * oO0o
def lisp_process_logfile ( ) :
ii1IIIIiI11 = "./logs/lisp-{}.log" . format ( lisp_log_id )
if ( os . path . exists ( ii1IIIIiI11 ) ) : return
if 40 - 40: o0oOOo0O0Ooo
sys . stdout . close ( )
sys . stdout = open ( ii1IIIIiI11 , "a" )
if 67 - 67: oO0o + II111iiii - O0 . oO0o * II111iiii * I11i
lisp_print_banner ( bold ( "logfile rotation" , False ) )
return
if 90 - 90: Ii1I . IiII
if 81 - 81: OOooOOo - I11i % ooOoO0o - OoO0O00 / Oo0Ooo
if 4 - 4: OoooooooOO - i1IIi % Ii1I - OOooOOo * o0oOOo0O0Ooo
if 85 - 85: OoooooooOO * iIii1I11I1II1 . iII111i / OoooooooOO % I1IiiI % O0
if 36 - 36: Ii1I / II111iiii / IiII / IiII + I1ii11iIi11i
if 95 - 95: IiII
if 51 - 51: II111iiii + IiII . i1IIi . I1ii11iIi11i + OoOoOO00 * I1IiiI
if 72 - 72: oO0o + oO0o / II111iiii . OoooooooOO % Ii1I
def lisp_i_am ( name ) :
global lisp_log_id , lisp_i_am_itr , lisp_i_am_etr , lisp_i_am_rtr
global lisp_i_am_mr , lisp_i_am_ms , lisp_i_am_ddt , lisp_i_am_core
global lisp_hostname
if 49 - 49: oO0o . OoO0O00 - Oo0Ooo * OoooooooOO . Oo0Ooo
lisp_log_id = name
if ( name == "itr" ) : lisp_i_am_itr = True
if ( name == "etr" ) : lisp_i_am_etr = True
if ( name == "rtr" ) : lisp_i_am_rtr = True
if ( name == "mr" ) : lisp_i_am_mr = True
if ( name == "ms" ) : lisp_i_am_ms = True
if ( name == "ddt" ) : lisp_i_am_ddt = True
if ( name == "core" ) : lisp_i_am_core = True
if 2 - 2: OoooooooOO % OOooOOo
if 63 - 63: I1IiiI % iIii1I11I1II1
if 39 - 39: iII111i / II111iiii / I1ii11iIi11i % I1IiiI
if 89 - 89: I1Ii111 + OoooooooOO + I1Ii111 * i1IIi + iIii1I11I1II1 % I11i
if 59 - 59: OOooOOo + i11iIiiIii
lisp_hostname = socket . gethostname ( )
oo0OOo0O = lisp_hostname . find ( "." )
if ( oo0OOo0O != - 1 ) : lisp_hostname = lisp_hostname [ 0 : oo0OOo0O ]
return
if 39 - 39: OoooooooOO + oO0o % OOooOOo / OOooOOo
if 27 - 27: iII111i . I11i . iIii1I11I1II1 . iIii1I11I1II1
if 20 - 20: o0oOOo0O0Ooo / i1IIi
if 71 - 71: OoOoOO00 . i1IIi
if 94 - 94: OOooOOo . I1Ii111
if 84 - 84: O0 . I11i - II111iiii . ooOoO0o / II111iiii
if 47 - 47: OoooooooOO
def lprint ( * args ) :
if ( lisp_debug_logging == False ) : return
if 4 - 4: I1IiiI % I11i
lisp_process_logfile ( )
OOOO0O00o = datetime . datetime . now ( ) . strftime ( "%m/%d/%y %H:%M:%S.%f" )
OOOO0O00o = OOOO0O00o [ : - 3 ]
print "{}: {}:" . format ( OOOO0O00o , lisp_log_id ) ,
for I1 in args : print I1 ,
print ""
try : sys . stdout . flush ( )
except : pass
return
if 67 - 67: OoO0O00 + oO0o
if 88 - 88: iII111i
if 19 - 19: II111iiii * IiII + Ii1I
if 65 - 65: OOooOOo . I1Ii111 . OoO0O00 . iII111i - OOooOOo
if 19 - 19: i11iIiiIii + iII111i % ooOoO0o
if 14 - 14: OoO0O00 . II111iiii . I11i / Ii1I % I1ii11iIi11i - ooOoO0o
if 67 - 67: I11i - OOooOOo . i1IIi
if 35 - 35: iII111i + ooOoO0o - oO0o . iII111i . IiII
def dprint ( * args ) :
if ( lisp_data_plane_logging ) : lprint ( * args )
return
if 87 - 87: OoOoOO00
if 25 - 25: i1IIi . OoO0O00 - OoOoOO00 / OoO0O00 % OoO0O00 * iIii1I11I1II1
if 50 - 50: OoO0O00 . i11iIiiIii - oO0o . oO0o
if 31 - 31: OOooOOo / Oo0Ooo * i1IIi . OoOoOO00
if 57 - 57: OOooOOo + iIii1I11I1II1 % i1IIi % I1IiiI
if 83 - 83: o0oOOo0O0Ooo / i11iIiiIii % iIii1I11I1II1 . I11i % oO0o . OoooooooOO
if 94 - 94: Ii1I + iIii1I11I1II1 % OoO0O00
if 93 - 93: Ii1I - OOooOOo + iIii1I11I1II1 * o0oOOo0O0Ooo + I1Ii111 . iII111i
def debug ( * args ) :
lisp_process_logfile ( )
if 49 - 49: OoooooooOO * I11i - Oo0Ooo . oO0o
OOOO0O00o = datetime . datetime . now ( ) . strftime ( "%m/%d/%y %H:%M:%S.%f" )
OOOO0O00o = OOOO0O00o [ : - 3 ]
if 89 - 89: ooOoO0o + Ii1I * ooOoO0o / ooOoO0o
print red ( ">>>" , False ) ,
print "{}:" . format ( OOOO0O00o ) ,
for I1 in args : print I1 ,
print red ( "<<<\n" , False )
try : sys . stdout . flush ( )
except : pass
return
if 46 - 46: OoO0O00
if 71 - 71: I11i / I11i * oO0o * oO0o / II111iiii
if 35 - 35: OOooOOo * o0oOOo0O0Ooo * I1IiiI % Oo0Ooo . OoOoOO00
if 58 - 58: I11i + II111iiii * iII111i * i11iIiiIii - iIii1I11I1II1
if 68 - 68: OoooooooOO % II111iiii
if 26 - 26: II111iiii % i11iIiiIii % iIii1I11I1II1 % I11i * I11i * I1ii11iIi11i
if 24 - 24: II111iiii % I1Ii111 - ooOoO0o + I1IiiI * I1ii11iIi11i
def lisp_print_banner ( string ) :
global lisp_version , lisp_hostname
if 2 - 2: Ii1I - IiII
if ( lisp_version == "" ) :
lisp_version = commands . getoutput ( "cat lisp-version.txt" )
if 83 - 83: oO0o % o0oOOo0O0Ooo % Ii1I - II111iiii * OOooOOo / OoooooooOO
IIIiIi = bold ( lisp_hostname , False )
lprint ( "lispers.net LISP {} {}, version {}, hostname {}" . format ( string ,
datetime . datetime . now ( ) , lisp_version , IIIiIi ) )
return
if 34 - 34: OoooooooOO . O0 / oO0o * OoOoOO00 - I1ii11iIi11i
if 36 - 36: i1IIi / O0 / OoO0O00 - O0 - i1IIi
if 22 - 22: i1IIi + Ii1I
if 54 - 54: ooOoO0o % OOooOOo . I1Ii111 + oO0o - OOooOOo * I1IiiI
if 92 - 92: o0oOOo0O0Ooo + I1Ii111 / Oo0Ooo % OoO0O00 % IiII . OoooooooOO
if 52 - 52: ooOoO0o / i11iIiiIii - OOooOOo . IiII % iIii1I11I1II1 + o0oOOo0O0Ooo
if 71 - 71: oO0o % I11i * OoOoOO00 . O0 / Ii1I . I1ii11iIi11i
def green ( string , html ) :
if ( html ) : return ( '<font color="green"><b>{}</b></font>' . format ( string ) )
return ( bold ( "\033[92m" + string + "\033[0m" , html ) )
if 58 - 58: Oo0Ooo / oO0o
if 44 - 44: OOooOOo
if 54 - 54: Ii1I - I11i - I1Ii111 . iIii1I11I1II1
if 79 - 79: Ii1I . OoO0O00
if 40 - 40: o0oOOo0O0Ooo + Oo0Ooo . o0oOOo0O0Ooo % ooOoO0o
if 15 - 15: Ii1I * Oo0Ooo % I1ii11iIi11i * iIii1I11I1II1 - i11iIiiIii
if 60 - 60: I1IiiI * I1Ii111 % OoO0O00 + oO0o
def green_last_sec ( string ) :
return ( green ( string , True ) )
if 52 - 52: i1IIi
if 84 - 84: Ii1I / IiII
if 86 - 86: OoOoOO00 * II111iiii - O0 . OoOoOO00 % iIii1I11I1II1 / OOooOOo
if 11 - 11: I1IiiI * oO0o + I1ii11iIi11i / I1ii11iIi11i
if 37 - 37: i11iIiiIii + i1IIi
if 23 - 23: iII111i + I11i . OoOoOO00 * I1IiiI + I1ii11iIi11i
if 18 - 18: IiII * o0oOOo0O0Ooo . IiII / O0
def green_last_min ( string ) :
return ( '<font color="#58D68D"><b>{}</b></font>' . format ( string ) )
if 8 - 8: o0oOOo0O0Ooo
if 4 - 4: I1ii11iIi11i + I1ii11iIi11i * ooOoO0o - OoOoOO00
if 78 - 78: Ii1I / II111iiii % OoOoOO00
if 52 - 52: OOooOOo - iII111i * oO0o
if 17 - 17: OoooooooOO + OOooOOo * I11i * OoOoOO00
if 36 - 36: O0 + Oo0Ooo
if 5 - 5: Oo0Ooo * OoOoOO00
def red ( string , html ) :
if ( html ) : return ( '<font color="red"><b>{}</b></font>' . format ( string ) )
return ( bold ( "\033[91m" + string + "\033[0m" , html ) )
if 46 - 46: ooOoO0o
if 33 - 33: iII111i - II111iiii * OoooooooOO - Oo0Ooo - OOooOOo
if 84 - 84: I1Ii111 + Oo0Ooo - OoOoOO00 * OoOoOO00
if 61 - 61: OoooooooOO . oO0o . OoooooooOO / Oo0Ooo
if 72 - 72: i1IIi
if 82 - 82: OoOoOO00 + OoooooooOO / i11iIiiIii * I1ii11iIi11i . OoooooooOO
if 63 - 63: I1ii11iIi11i
def blue ( string , html ) :
if ( html ) : return ( '<font color="blue"><b>{}</b></font>' . format ( string ) )
return ( bold ( "\033[94m" + string + "\033[0m" , html ) )
if 6 - 6: ooOoO0o / I1ii11iIi11i
if 57 - 57: I11i
if 67 - 67: OoO0O00 . ooOoO0o
if 87 - 87: oO0o % Ii1I
if 83 - 83: II111iiii - I11i
if 35 - 35: i1IIi - iIii1I11I1II1 + i1IIi
if 86 - 86: iIii1I11I1II1 + OoOoOO00 . i11iIiiIii - Ii1I
def bold ( string , html ) :
if ( html ) : return ( "<b>{}</b>" . format ( string ) )
return ( "\033[1m" + string + "\033[0m" )
if 51 - 51: OoOoOO00
if 14 - 14: IiII % oO0o % Oo0Ooo - i11iIiiIii
if 53 - 53: Ii1I % Oo0Ooo
if 59 - 59: OOooOOo % iIii1I11I1II1 . i1IIi + II111iiii * IiII
if 41 - 41: Ii1I % I1ii11iIi11i
if 12 - 12: OOooOOo
if 69 - 69: OoooooooOO + OOooOOo
def convert_font ( string ) :
IIi11I1 = [ [ "[91m" , red ] , [ "[92m" , green ] , [ "[94m" , blue ] , [ "[1m" , bold ] ]
iiiI111I = "[0m"
if 75 - 75: OoooooooOO % OoO0O00 / I1IiiI
for Oo0ooo0Ooo in IIi11I1 :
i1II1I = Oo0ooo0Ooo [ 0 ]
OOoO0ooOO = Oo0ooo0Ooo [ 1 ]
ii = len ( i1II1I )
oo0OOo0O = string . find ( i1II1I )
if ( oo0OOo0O != - 1 ) : break
if 1 - 1: ooOoO0o
if 78 - 78: I1ii11iIi11i + I11i - O0
while ( oo0OOo0O != - 1 ) :
i1I1iIi1IiI = string [ oo0OOo0O : : ] . find ( iiiI111I )
i1111 = string [ oo0OOo0O + ii : oo0OOo0O + i1I1iIi1IiI ]
string = string [ : oo0OOo0O ] + OOoO0ooOO ( i1111 , True ) + string [ oo0OOo0O + i1I1iIi1IiI + ii : : ]
if 82 - 82: ooOoO0o % Ii1I - ooOoO0o % OoOoOO00
oo0OOo0O = string . find ( i1II1I )
if 47 - 47: iIii1I11I1II1 . oO0o . OOooOOo * i1IIi
if 32 - 32: i11iIiiIii - i1IIi % OOooOOo . O0 % OoOoOO00 * Oo0Ooo
if 90 - 90: OOooOOo * I1Ii111
if 50 - 50: IiII % i1IIi
if 21 - 21: OoooooooOO - iIii1I11I1II1
if ( string . find ( "[1m" ) != - 1 ) : string = convert_font ( string )
return ( string )
if 93 - 93: oO0o - o0oOOo0O0Ooo % OoOoOO00 . OoOoOO00 - ooOoO0o
if 90 - 90: ooOoO0o + II111iiii * I1ii11iIi11i / Ii1I . o0oOOo0O0Ooo + o0oOOo0O0Ooo
if 40 - 40: ooOoO0o / OoOoOO00 % i11iIiiIii % I1ii11iIi11i / I1IiiI
if 62 - 62: i1IIi - OoOoOO00
if 62 - 62: i1IIi + Oo0Ooo % IiII
if 28 - 28: I1ii11iIi11i . i1IIi
if 10 - 10: OoO0O00 / Oo0Ooo
def lisp_space ( num ) :
I1i = ""
for II11iIII1i1I in range ( num ) : I1i += " "
return ( I1i )
if 63 - 63: Oo0Ooo + I1Ii111 - II111iiii
if 2 - 2: IiII
if 97 - 97: oO0o - OoooooooOO
if 79 - 79: OoOoOO00 % IiII % Oo0Ooo
if 29 - 29: OoooooooOO . I1IiiI % I1ii11iIi11i - iII111i
if 8 - 8: i1IIi
if 32 - 32: oO0o / II111iiii
def lisp_button ( string , url ) :
II1Iii = '<button style="background-color:transparent;border-radius:10px; ' + 'type="button">'
if 73 - 73: I11i * OoooooooOO . O0 . IiII
if 55 - 55: Oo0Ooo
if ( url == None ) :
ooO0o = II1Iii + string + "</button>"
else :
ii1iI1iI1 = '<a href="{}">' . format ( url )
o00oOOO = lisp_space ( 2 )
ooO0o = o00oOOO + ii1iI1iI1 + II1Iii + string + "</button></a>" + o00oOOO
if 57 - 57: I1IiiI - o0oOOo0O0Ooo + OoO0O00 % Oo0Ooo
return ( ooO0o )
if 26 - 26: iII111i . iII111i
if 35 - 35: I1Ii111 . OoOoOO00 * i11iIiiIii
if 44 - 44: i11iIiiIii / Oo0Ooo
if 42 - 42: OoooooooOO + Oo0Ooo % II111iiii + OoO0O00
if 24 - 24: iII111i * II111iiii % iII111i % IiII + OoooooooOO
if 29 - 29: II111iiii - OoooooooOO - i11iIiiIii . o0oOOo0O0Ooo
if 19 - 19: II111iiii
def lisp_print_cour ( string ) :
I1i = '<font face="Courier New">{}</font>' . format ( string )
return ( I1i )
if 72 - 72: OoooooooOO / I1IiiI + Ii1I / OoOoOO00 * Ii1I
if 34 - 34: O0 * O0 % OoooooooOO + iII111i * iIii1I11I1II1 % Ii1I
if 25 - 25: I11i + OoOoOO00 . o0oOOo0O0Ooo % OoOoOO00 * OOooOOo
if 32 - 32: i11iIiiIii - I1Ii111
if 53 - 53: OoooooooOO - IiII
if 87 - 87: oO0o . I1IiiI
if 17 - 17: Ii1I . i11iIiiIii
def lisp_print_sans ( string ) :
I1i = '<font face="Sans-Serif">{}</font>' . format ( string )
return ( I1i )
if 5 - 5: I1ii11iIi11i + O0 + O0 . I1Ii111 - ooOoO0o
if 63 - 63: oO0o
if 71 - 71: i1IIi . Ii1I * iII111i % OoooooooOO + OOooOOo
if 36 - 36: IiII
if 49 - 49: OOooOOo / OoooooooOO / I1IiiI
if 74 - 74: I1Ii111 % I1ii11iIi11i
if 7 - 7: II111iiii
def lisp_span ( string , hover_string ) :
I1i = '<span title="{}">{}</span>' . format ( hover_string , string )
return ( I1i )
if 27 - 27: oO0o . OoooooooOO + i11iIiiIii
if 86 - 86: I11i / o0oOOo0O0Ooo - o0oOOo0O0Ooo + I1ii11iIi11i + oO0o
if 33 - 33: o0oOOo0O0Ooo . iII111i . IiII . i1IIi
if 49 - 49: I1ii11iIi11i
if 84 - 84: I11i - Oo0Ooo / O0 - I1Ii111
if 21 - 21: O0 * O0 % I1ii11iIi11i
if 94 - 94: I11i + II111iiii % i11iIiiIii
def lisp_eid_help_hover ( output ) :
i1i1IiIiIi1Ii = '''Unicast EID format:
For longest match lookups:
<address> or [<iid>]<address>
For exact match lookups:
<prefix> or [<iid>]<prefix>
Multicast EID format:
For longest match lookups:
<address>-><group> or
[<iid>]<address>->[<iid>]<group>'''
if 64 - 64: OOooOOo + OoooooooOO * OoooooooOO
if 41 - 41: ooOoO0o . Oo0Ooo + I1IiiI
o0O0OO = lisp_span ( output , i1i1IiIiIi1Ii )
return ( o0O0OO )
if 22 - 22: II111iiii * OoO0O00 * I11i + I1ii11iIi11i * o0oOOo0O0Ooo
if 100 - 100: i1IIi / IiII
if 3 - 3: II111iiii % I1ii11iIi11i - OoooooooOO * Oo0Ooo . iIii1I11I1II1
if 37 - 37: iII111i / Oo0Ooo . I11i * I11i
if 80 - 80: OOooOOo % I1ii11iIi11i
if 91 - 91: I11i / O0 - Ii1I . I1IiiI
if 82 - 82: IiII * OOooOOo / oO0o
def lisp_geo_help_hover ( output ) :
i1i1IiIiIi1Ii = '''EID format:
<address> or [<iid>]<address>
'<name>' or [<iid>]'<name>'
Geo-Point format:
d-m-s-<N|S>-d-m-s-<W|E> or
[<iid>]d-m-s-<N|S>-d-m-s-<W|E>
Geo-Prefix format:
d-m-s-<N|S>-d-m-s-<W|E>/<km> or
[<iid>]d-m-s-<N|S>-d-m-s-<W|E>/<km>'''
if 2 - 2: I1IiiI + o0oOOo0O0Ooo . o0oOOo0O0Ooo . O0 / I11i
if 40 - 40: o0oOOo0O0Ooo - II111iiii / Oo0Ooo
o0O0OO = lisp_span ( output , i1i1IiIiIi1Ii )
return ( o0O0OO )
if 14 - 14: I1ii11iIi11i
if 5 - 5: o0oOOo0O0Ooo . iIii1I11I1II1 % iIii1I11I1II1
if 56 - 56: OoooooooOO - I11i - i1IIi
if 8 - 8: I1Ii111 / OOooOOo . I1IiiI + I1ii11iIi11i / i11iIiiIii
if 31 - 31: ooOoO0o - iIii1I11I1II1 + iII111i . Oo0Ooo / IiII % iIii1I11I1II1
if 6 - 6: IiII * i11iIiiIii % iIii1I11I1II1 % i11iIiiIii + o0oOOo0O0Ooo / i1IIi
if 53 - 53: I11i + iIii1I11I1II1
def space ( num ) :
I1i = ""
for II11iIII1i1I in range ( num ) : I1i += " "
return ( I1i )
if 70 - 70: I1ii11iIi11i
if 67 - 67: OoooooooOO
if 29 - 29: O0 - i11iIiiIii - II111iiii + OOooOOo * IiII
if 2 - 2: i1IIi - ooOoO0o + I1IiiI . o0oOOo0O0Ooo * o0oOOo0O0Ooo / OoOoOO00
if 93 - 93: i1IIi
if 53 - 53: OoooooooOO + Oo0Ooo + oO0o
if 24 - 24: iII111i - IiII - iII111i * I1ii11iIi11i . OoooooooOO / IiII
if 66 - 66: Oo0Ooo
def lisp_get_ephemeral_port ( ) :
return ( random . randrange ( 32768 , 65535 ) )
if 97 - 97: i1IIi - OoooooooOO / I1Ii111 * I1IiiI
if 55 - 55: o0oOOo0O0Ooo . iII111i
if 87 - 87: o0oOOo0O0Ooo % iIii1I11I1II1
if 100 - 100: I1Ii111 . I1IiiI * I1Ii111 - I1IiiI . I11i * Ii1I
if 89 - 89: OoO0O00 + IiII * I1Ii111
if 28 - 28: OoooooooOO . oO0o % I1ii11iIi11i / i1IIi / OOooOOo
if 36 - 36: o0oOOo0O0Ooo + I11i - IiII + iIii1I11I1II1 + OoooooooOO
def lisp_get_data_nonce ( ) :
return ( random . randint ( 0 , 0xffffff ) )
if 4 - 4: II111iiii . I11i + Ii1I * I1Ii111 . ooOoO0o
if 87 - 87: OoOoOO00 / OoO0O00 / i11iIiiIii
if 74 - 74: oO0o / I1ii11iIi11i % o0oOOo0O0Ooo
if 88 - 88: OoOoOO00 - i11iIiiIii % o0oOOo0O0Ooo * I11i + I1ii11iIi11i
if 52 - 52: II111iiii . I1IiiI + OoOoOO00 % OoO0O00
if 62 - 62: o0oOOo0O0Ooo
if 15 - 15: I11i + Ii1I . OOooOOo * OoO0O00 . OoOoOO00
def lisp_get_control_nonce ( ) :
return ( random . randint ( 0 , ( 2 ** 64 ) - 1 ) )
if 18 - 18: i1IIi % II111iiii + I1Ii111 % Ii1I
if 72 - 72: iIii1I11I1II1
if 45 - 45: Oo0Ooo - o0oOOo0O0Ooo % I1Ii111
if 38 - 38: I1Ii111 % OOooOOo - OoooooooOO
if 87 - 87: OoO0O00 % I1IiiI
if 77 - 77: iIii1I11I1II1 - i1IIi . oO0o
if 26 - 26: o0oOOo0O0Ooo * IiII . i1IIi
if 59 - 59: O0 + i1IIi - o0oOOo0O0Ooo
if 62 - 62: i11iIiiIii % OOooOOo . IiII . OOooOOo
def lisp_hex_string ( integer_value ) :
ooOo0O0O0oOO0 = hex ( integer_value ) [ 2 : : ]
if ( ooOo0O0O0oOO0 [ - 1 ] == "L" ) : ooOo0O0O0oOO0 = ooOo0O0O0oOO0 [ 0 : - 1 ]
return ( ooOo0O0O0oOO0 )
if 10 - 10: Oo0Ooo + O0
if 43 - 43: iIii1I11I1II1 / II111iiii % o0oOOo0O0Ooo - OOooOOo
if 62 - 62: I11i
if 63 - 63: OOooOOo + ooOoO0o * oO0o / o0oOOo0O0Ooo / Oo0Ooo * iIii1I11I1II1
if 57 - 57: OoOoOO00 - oO0o / ooOoO0o % i11iIiiIii
if 3 - 3: iII111i . ooOoO0o % I1IiiI + I1ii11iIi11i
if 64 - 64: i1IIi
def lisp_get_timestamp ( ) :
return ( time . time ( ) )
if 29 - 29: o0oOOo0O0Ooo / i11iIiiIii / I1IiiI % oO0o % i11iIiiIii
if 18 - 18: OOooOOo + I1Ii111
if 80 - 80: oO0o + o0oOOo0O0Ooo * Ii1I + OoO0O00
if 75 - 75: I11i / o0oOOo0O0Ooo / OOooOOo / IiII % ooOoO0o + II111iiii
if 4 - 4: iII111i - Oo0Ooo - IiII - I11i % i11iIiiIii / OoO0O00
if 50 - 50: ooOoO0o + i1IIi
if 31 - 31: Ii1I
def lisp_set_timestamp ( seconds ) :
return ( time . time ( ) + seconds )
if 78 - 78: i11iIiiIii + o0oOOo0O0Ooo + I1Ii111 / o0oOOo0O0Ooo % iIii1I11I1II1 % IiII
if 83 - 83: iIii1I11I1II1 % OoOoOO00 % o0oOOo0O0Ooo % I1Ii111 . I1ii11iIi11i % O0
if 47 - 47: o0oOOo0O0Ooo
if 66 - 66: I1IiiI - IiII
if 33 - 33: I1IiiI / OoO0O00
if 12 - 12: II111iiii
if 2 - 2: i1IIi - I1IiiI + I11i . II111iiii
def lisp_print_elapsed ( ts ) :
if ( ts == 0 or ts == None ) : return ( "never" )
iIIiI1iiI = time . time ( ) - ts
iIIiI1iiI = round ( iIIiI1iiI , 0 )
return ( str ( datetime . timedelta ( seconds = iIIiI1iiI ) ) )
if 18 - 18: iII111i - oO0o % iII111i / I11i
if 68 - 68: Ii1I * iIii1I11I1II1 + I1Ii111 % OoOoOO00
if 46 - 46: OoOoOO00 % i1IIi / oO0o * Oo0Ooo * OOooOOo
if 67 - 67: OoOoOO00 * OoOoOO00 . OoOoOO00 + Ii1I / oO0o
if 13 - 13: iII111i
if 80 - 80: Ii1I - o0oOOo0O0Ooo
if 41 - 41: o0oOOo0O0Ooo - Oo0Ooo * I1IiiI
def lisp_print_future ( ts ) :
if ( ts == 0 ) : return ( "never" )
OO0OoOo0OOO = ts - time . time ( )
if ( OO0OoOo0OOO < 0 ) : return ( "expired" )
OO0OoOo0OOO = round ( OO0OoOo0OOO , 0 )
return ( str ( datetime . timedelta ( seconds = OO0OoOo0OOO ) ) )
if 47 - 47: OoooooooOO % O0 * iII111i . Ii1I
if 38 - 38: O0 - IiII % I1Ii111
if 64 - 64: iIii1I11I1II1
if 15 - 15: I1ii11iIi11i + OOooOOo / I1ii11iIi11i / I1Ii111
if 31 - 31: ooOoO0o + O0 + ooOoO0o . iIii1I11I1II1 + Oo0Ooo / o0oOOo0O0Ooo
if 6 - 6: Oo0Ooo % IiII * I11i / I1IiiI + Oo0Ooo
if 39 - 39: OoOoOO00 - Oo0Ooo / iII111i * OoooooooOO
if 100 - 100: O0 . I11i . OoO0O00 + O0 * oO0o
if 42 - 42: oO0o % OoooooooOO + o0oOOo0O0Ooo
if 56 - 56: OoooooooOO + I1ii11iIi11i - iII111i
if 24 - 24: o0oOOo0O0Ooo + ooOoO0o + I11i - iIii1I11I1II1
if 49 - 49: I11i . ooOoO0o * OoOoOO00 % IiII . O0
if 48 - 48: O0 * Ii1I - O0 / Ii1I + OoOoOO00
def lisp_print_eid_tuple ( eid , group ) :
oO00oo000O = eid . print_prefix ( )
if ( group . is_null ( ) ) : return ( oO00oo000O )
if 7 - 7: O0 / iII111i * oO0o
i1iii1ii = group . print_prefix ( )
II1 = group . instance_id
if 27 - 27: Ii1I + I1IiiI * iIii1I11I1II1 . OoooooooOO * OoOoOO00
if ( eid . is_null ( ) or eid . is_exact_match ( group ) ) :
oo0OOo0O = i1iii1ii . find ( "]" ) + 1
return ( "[{}](*, {})" . format ( II1 , i1iii1ii [ oo0OOo0O : : ] ) )
if 100 - 100: OoO0O00 / i1IIi - I1IiiI % Ii1I - iIii1I11I1II1
if 17 - 17: I11i / o0oOOo0O0Ooo % Oo0Ooo
o0o = eid . print_sg ( group )
return ( o0o )
if 93 - 93: ooOoO0o % i11iIiiIii % I1Ii111
if 64 - 64: I1Ii111 + I1IiiI * O0 / Oo0Ooo - I11i % I11i
if 59 - 59: OOooOOo + OoooooooOO
if 55 - 55: i11iIiiIii % iIii1I11I1II1 . i1IIi + OoooooooOO / i11iIiiIii
if 10 - 10: iII111i - oO0o * iIii1I11I1II1 % iIii1I11I1II1 * IiII - I1ii11iIi11i
if 97 - 97: II111iiii % I1Ii111 + I1Ii111 - OoO0O00 / Ii1I * I1IiiI
if 17 - 17: Ii1I
if 39 - 39: ooOoO0o . II111iiii
def lisp_convert_6to4 ( addr_str ) :
if ( addr_str . find ( "::ffff:" ) == - 1 ) : return ( addr_str )
iIiIi1iI11iiI = addr_str . split ( ":" )
return ( iIiIi1iI11iiI [ - 1 ] )
if 26 - 26: iIii1I11I1II1 * I1Ii111 - OOooOOo
if 27 - 27: I1ii11iIi11i * I1Ii111 - OoO0O00 + Ii1I * Ii1I
if 55 - 55: ooOoO0o
if 82 - 82: I1Ii111 - OOooOOo + OoO0O00
if 64 - 64: o0oOOo0O0Ooo . O0 * Ii1I + OoooooooOO - Oo0Ooo . OoooooooOO
if 70 - 70: Oo0Ooo - oO0o . iIii1I11I1II1 % I11i / OoOoOO00 - O0
if 55 - 55: iII111i - OoO0O00
if 100 - 100: O0
if 79 - 79: iIii1I11I1II1
if 81 - 81: OOooOOo + iIii1I11I1II1 * I1Ii111 - iIii1I11I1II1 . OOooOOo
if 48 - 48: I11i . OoooooooOO . I1IiiI . OoOoOO00 % I1ii11iIi11i / iII111i
def lisp_convert_4to6 ( addr_str ) :
iIiIi1iI11iiI = lisp_address ( LISP_AFI_IPV6 , "" , 128 , 0 )
if ( iIiIi1iI11iiI . is_ipv4_string ( addr_str ) ) : addr_str = "::ffff:" + addr_str
iIiIi1iI11iiI . store_address ( addr_str )
return ( iIiIi1iI11iiI )
if 11 - 11: i1IIi % OoO0O00 % iII111i
if 99 - 99: ooOoO0o / iIii1I11I1II1 - Ii1I * I1ii11iIi11i % I1IiiI
if 13 - 13: OoO0O00
if 70 - 70: I1Ii111 + O0 . oO0o * Ii1I
if 2 - 2: OoooooooOO . OOooOOo . IiII
if 42 - 42: OOooOOo % oO0o / OoO0O00 - oO0o * i11iIiiIii
if 19 - 19: oO0o * I1IiiI % i11iIiiIii
if 24 - 24: o0oOOo0O0Ooo
if 10 - 10: o0oOOo0O0Ooo % Ii1I / OOooOOo
def lisp_gethostbyname ( string ) :
i11Ii1iIiII = string . split ( "." )
O0oOo00Ooo0o0 = string . split ( ":" )
i1IiII1i1I = string . split ( "-" )
if 39 - 39: I11i
if ( len ( i11Ii1iIiII ) > 1 ) :
if ( i11Ii1iIiII [ 0 ] . isdigit ( ) ) : return ( string )
if 64 - 64: iIii1I11I1II1 / O0 % IiII . OoooooooOO + IiII + oO0o
if ( len ( O0oOo00Ooo0o0 ) > 1 ) :
try :
int ( O0oOo00Ooo0o0 [ 0 ] , 16 )
return ( string )
except :
pass
if 79 - 79: OoooooooOO - IiII * IiII . OoOoOO00
if 100 - 100: II111iiii * I11i % I1IiiI / I1ii11iIi11i
if 90 - 90: I1ii11iIi11i . ooOoO0o . OoOoOO00 . Ii1I
if 4 - 4: Ii1I + OoOoOO00 % I1ii11iIi11i / i11iIiiIii
if 74 - 74: II111iiii . O0 - I1IiiI + IiII % i11iIiiIii % OoOoOO00
if 78 - 78: Ii1I + OoOoOO00 + IiII - IiII . i11iIiiIii / OoO0O00
if 27 - 27: Ii1I - O0 % I11i * I1Ii111 . IiII % iIii1I11I1II1
if ( len ( i1IiII1i1I ) == 3 ) :
for II11iIII1i1I in range ( 3 ) :
try : int ( i1IiII1i1I [ II11iIII1i1I ] , 16 )
except : break
if 37 - 37: OoooooooOO + O0 - i1IIi % ooOoO0o
if 24 - 24: OoOoOO00
if 94 - 94: i1IIi * i1IIi % II111iiii + OOooOOo
try :
iIiIi1iI11iiI = socket . gethostbyname ( string )
return ( iIiIi1iI11iiI )
except :
if ( lisp_is_alpine ( ) == False ) : return ( "" )
if 28 - 28: I1IiiI
if 49 - 49: I11i . o0oOOo0O0Ooo % oO0o / Ii1I
if 95 - 95: O0 * OoOoOO00 * IiII . ooOoO0o / iIii1I11I1II1
if 28 - 28: IiII + oO0o - ooOoO0o / iIii1I11I1II1 - I1IiiI
if 45 - 45: O0 / i1IIi * oO0o * OoO0O00
try :
iIiIi1iI11iiI = socket . getaddrinfo ( string , 0 ) [ 0 ]
if ( iIiIi1iI11iiI [ 3 ] != string ) : return ( "" )
iIiIi1iI11iiI = iIiIi1iI11iiI [ 4 ] [ 0 ]
except :
iIiIi1iI11iiI = ""
if 35 - 35: I1ii11iIi11i / iII111i % I1IiiI + iIii1I11I1II1
return ( iIiIi1iI11iiI )
if 79 - 79: OoOoOO00 / ooOoO0o
if 77 - 77: Oo0Ooo
if 46 - 46: I1Ii111
if 72 - 72: iII111i * OOooOOo
if 67 - 67: i1IIi
if 5 - 5: II111iiii . OoooooooOO
if 57 - 57: I1IiiI
if 35 - 35: OoooooooOO - I1Ii111 / OoO0O00
def lisp_ip_checksum ( data ) :
if ( len ( data ) < 20 ) :
lprint ( "IPv4 packet too short, length {}" . format ( len ( data ) ) )
return ( data )
if 50 - 50: OoOoOO00
if 33 - 33: I11i
oOo00OoO0O = binascii . hexlify ( data )
if 69 - 69: iIii1I11I1II1 * I1IiiI - iII111i + O0 + O0
if 65 - 65: I1Ii111 / i11iIiiIii / OoO0O00 - OOooOOo
if 9 - 9: I1IiiI / I1Ii111 - Oo0Ooo * iIii1I11I1II1
if 86 - 86: II111iiii + ooOoO0o + IiII
I11i11I = 0
for II11iIII1i1I in range ( 0 , 40 , 4 ) :
I11i11I += int ( oOo00OoO0O [ II11iIII1i1I : II11iIII1i1I + 4 ] , 16 )
if 90 - 90: I1ii11iIi11i
if 9 - 9: IiII + ooOoO0o
if 7 - 7: O0 % I1Ii111 + I1ii11iIi11i + Ii1I % OoooooooOO . Oo0Ooo
if 56 - 56: iII111i
if 84 - 84: OoOoOO00 - i11iIiiIii
I11i11I = ( I11i11I >> 16 ) + ( I11i11I & 0xffff )
I11i11I += I11i11I >> 16
I11i11I = socket . htons ( ~ I11i11I & 0xffff )
if 1 - 1: iII111i * OoOoOO00
if 66 - 66: OoOoOO00 + i1IIi % II111iiii . O0 * I1ii11iIi11i % I1ii11iIi11i
if 87 - 87: OOooOOo + o0oOOo0O0Ooo . iII111i - OoooooooOO
if 6 - 6: iIii1I11I1II1 * OoooooooOO
I11i11I = struct . pack ( "H" , I11i11I )
oOo00OoO0O = data [ 0 : 10 ] + I11i11I + data [ 12 : : ]
return ( oOo00OoO0O )
if 28 - 28: Oo0Ooo * o0oOOo0O0Ooo / I1Ii111
if 52 - 52: O0 / o0oOOo0O0Ooo % iII111i * I1IiiI % OOooOOo
if 69 - 69: I1ii11iIi11i
if 83 - 83: o0oOOo0O0Ooo
if 38 - 38: I1Ii111 + OoooooooOO . i1IIi
if 19 - 19: iII111i - o0oOOo0O0Ooo - Ii1I - OoOoOO00 . iII111i . I1Ii111
if 48 - 48: iII111i + IiII
if 60 - 60: I11i + iII111i . IiII / i1IIi . iIii1I11I1II1
if 14 - 14: OOooOOo
if 79 - 79: Ii1I
if 76 - 76: iIii1I11I1II1
if 80 - 80: iIii1I11I1II1 . O0 / Ii1I % Ii1I
if 93 - 93: OoooooooOO * Oo0Ooo
if 10 - 10: I1Ii111 * OoooooooOO + I11i - I1ii11iIi11i / I1ii11iIi11i . i11iIiiIii
if 22 - 22: I1Ii111 / o0oOOo0O0Ooo
if 98 - 98: i1IIi
if 51 - 51: I1ii11iIi11i + ooOoO0o + Oo0Ooo / i1IIi + i1IIi
if 12 - 12: iIii1I11I1II1 . Ii1I . I1ii11iIi11i % I1IiiI . II111iiii . oO0o
if 32 - 32: I1ii11iIi11i + IiII / O0 / OoOoOO00 * OoooooooOO % ooOoO0o
if 50 - 50: OoO0O00
if 66 - 66: iIii1I11I1II1
if 41 - 41: I1Ii111 . O0 * I1IiiI * I1ii11iIi11i
if 100 - 100: iII111i
if 73 - 73: I1ii11iIi11i % II111iiii
if 79 - 79: OoOoOO00 + OoO0O00 - II111iiii + Ii1I
if 11 - 11: oO0o + iIii1I11I1II1
if 10 - 10: O0
if 68 - 68: OOooOOo + oO0o . O0 . Ii1I % i1IIi % OOooOOo
if 50 - 50: IiII + o0oOOo0O0Ooo
if 96 - 96: OoO0O00
if 92 - 92: Oo0Ooo / i11iIiiIii + I1ii11iIi11i
if 87 - 87: OoOoOO00 % iIii1I11I1II1
if 72 - 72: OOooOOo . OOooOOo - I1ii11iIi11i
if 48 - 48: Oo0Ooo - ooOoO0o + Oo0Ooo - I1IiiI * i11iIiiIii . iII111i
if 35 - 35: IiII . O0 + Oo0Ooo + OOooOOo + i1IIi
def lisp_udp_checksum ( source , dest , data ) :
if 65 - 65: O0 * I1IiiI / I1IiiI . OoOoOO00
if 87 - 87: II111iiii * I1ii11iIi11i % Oo0Ooo * Oo0Ooo
if 58 - 58: OOooOOo . o0oOOo0O0Ooo + I1IiiI % Oo0Ooo - OoO0O00
if 50 - 50: iII111i % II111iiii - ooOoO0o . i1IIi + O0 % iII111i
o00oOOO = lisp_address ( LISP_AFI_IPV6 , source , LISP_IPV6_HOST_MASK_LEN , 0 )
i1 = lisp_address ( LISP_AFI_IPV6 , dest , LISP_IPV6_HOST_MASK_LEN , 0 )
iIi1IIiIII1 = socket . htonl ( len ( data ) )
i1Ii11I1II = socket . htonl ( LISP_UDP_PROTOCOL )
oOOOoo0o = o00oOOO . pack_address ( )
oOOOoo0o += i1 . pack_address ( )
oOOOoo0o += struct . pack ( "II" , iIi1IIiIII1 , i1Ii11I1II )
if 44 - 44: O0 % i1IIi
if 42 - 42: II111iiii - OoO0O00 - OoooooooOO . iII111i / OoOoOO00
if 56 - 56: i11iIiiIii - iIii1I11I1II1 . II111iiii
if 81 - 81: IiII / OoOoOO00 * IiII . O0
OOOOo00oo00O = binascii . hexlify ( oOOOoo0o + data )
Oo0oo0oOO0oOo = len ( OOOOo00oo00O ) % 4
for II11iIII1i1I in range ( 0 , Oo0oo0oOO0oOo ) : OOOOo00oo00O += "0"
if 18 - 18: II111iiii + OoOoOO00 - I1Ii111 + OoO0O00 / ooOoO0o % IiII
if 94 - 94: iII111i % ooOoO0o . oO0o
if 85 - 85: OOooOOo * i1IIi % I1IiiI - ooOoO0o
if 37 - 37: IiII . Oo0Ooo * Oo0Ooo * II111iiii * O0
I11i11I = 0
for II11iIII1i1I in range ( 0 , len ( OOOOo00oo00O ) , 4 ) :
I11i11I += int ( OOOOo00oo00O [ II11iIII1i1I : II11iIII1i1I + 4 ] , 16 )
if 83 - 83: IiII / I1Ii111
if 64 - 64: OoO0O00 % IiII . I1Ii111 % OoO0O00 + I11i * IiII
if 83 - 83: o0oOOo0O0Ooo % oO0o + I11i % i11iIiiIii + O0
if 65 - 65: iIii1I11I1II1 % oO0o + O0 / OoooooooOO
if 52 - 52: Ii1I % OOooOOo * I1IiiI % I11i + OOooOOo / iII111i
I11i11I = ( I11i11I >> 16 ) + ( I11i11I & 0xffff )
I11i11I += I11i11I >> 16
I11i11I = socket . htons ( ~ I11i11I & 0xffff )
if 80 - 80: OoooooooOO + IiII
if 95 - 95: I1Ii111 / oO0o * I1Ii111 - OoooooooOO * OoooooooOO % OoO0O00
if 43 - 43: Oo0Ooo . I1Ii111
if 12 - 12: I1Ii111 + OOooOOo + I11i . IiII / Ii1I
I11i11I = struct . pack ( "H" , I11i11I )
OOOOo00oo00O = data [ 0 : 6 ] + I11i11I + data [ 8 : : ]
return ( OOOOo00oo00O )
if 29 - 29: IiII . ooOoO0o - II111iiii
if 68 - 68: iIii1I11I1II1 + II111iiii / oO0o
if 91 - 91: OoOoOO00 % iIii1I11I1II1 . I1IiiI
if 70 - 70: I11i % II111iiii % O0 . i1IIi / I1Ii111
if 100 - 100: I1ii11iIi11i * i11iIiiIii % oO0o / Oo0Ooo / ooOoO0o + I1ii11iIi11i
if 59 - 59: I1Ii111 - IiII
if 14 - 14: iIii1I11I1II1 - iIii1I11I1II1
def lisp_get_interface_address ( device ) :
if 5 - 5: IiII
if 84 - 84: II111iiii * oO0o * II111iiii % IiII / I1IiiI
if 100 - 100: IiII . Ii1I - iIii1I11I1II1 . i11iIiiIii / II111iiii
if 71 - 71: I1Ii111 * Oo0Ooo . I11i
if ( device not in netifaces . interfaces ( ) ) : return ( None )
if 49 - 49: IiII * O0 . IiII
if 19 - 19: II111iiii - IiII
if 59 - 59: o0oOOo0O0Ooo * OoO0O00 - Ii1I . OOooOOo
if 89 - 89: OOooOOo
o00oo0OO0 = netifaces . ifaddresses ( device )
if ( o00oo0OO0 . has_key ( netifaces . AF_INET ) == False ) : return ( None )
if 60 - 60: ooOoO0o
if 66 - 66: I11i / ooOoO0o % i1IIi - oO0o . O0 / O0
if 96 - 96: OoooooooOO + IiII * O0
if 86 - 86: Ii1I
IiII1i1iI = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
if 84 - 84: IiII + I1ii11iIi11i + Ii1I + iII111i
for iIiIi1iI11iiI in o00oo0OO0 [ netifaces . AF_INET ] :
ooOOo0o = iIiIi1iI11iiI [ "addr" ]
IiII1i1iI . store_address ( ooOOo0o )
return ( IiII1i1iI )
if 50 - 50: II111iiii - I1Ii111 + iIii1I11I1II1 + iIii1I11I1II1
return ( None )
if 91 - 91: II111iiii - O0 . iIii1I11I1II1 . O0 + I1ii11iIi11i - II111iiii
if 26 - 26: o0oOOo0O0Ooo
if 12 - 12: OoooooooOO / O0 + II111iiii * I1ii11iIi11i
if 46 - 46: II111iiii - IiII * OoooooooOO / oO0o % IiII
if 11 - 11: iIii1I11I1II1 . OoOoOO00 / IiII % ooOoO0o
if 61 - 61: ooOoO0o - OOooOOo + OOooOOo
if 40 - 40: i11iIiiIii . iIii1I11I1II1
if 2 - 2: i1IIi * oO0o - oO0o + OoooooooOO % OoOoOO00 / OoOoOO00
if 3 - 3: OoooooooOO
if 71 - 71: IiII + i1IIi - iII111i - i11iIiiIii . I11i - ooOoO0o
if 85 - 85: I1ii11iIi11i - OoOoOO00 / I1ii11iIi11i + OOooOOo - iII111i
if 49 - 49: OoO0O00 - O0 / OoO0O00 * OoOoOO00 + I1Ii111
def lisp_get_input_interface ( packet ) :
Ii = lisp_format_packet ( packet [ 0 : 12 ] ) . replace ( " " , "" )
ii1I = Ii [ 0 : 12 ]
Ooo000000 = Ii [ 12 : : ]
if 80 - 80: II111iiii - OOooOOo % OoooooooOO . iIii1I11I1II1 - ooOoO0o + I1IiiI
try : i1i1iiIIiiiII = lisp_mymacs . has_key ( Ooo000000 )
except : i1i1iiIIiiiII = False
if 5 - 5: OoooooooOO / o0oOOo0O0Ooo % I11i % OoO0O00 * iII111i + iIii1I11I1II1
if ( lisp_mymacs . has_key ( ii1I ) ) : return ( lisp_mymacs [ ii1I ] , Ooo000000 , ii1I , i1i1iiIIiiiII )
if ( i1i1iiIIiiiII ) : return ( lisp_mymacs [ Ooo000000 ] , Ooo000000 , ii1I , i1i1iiIIiiiII )
return ( [ "?" ] , Ooo000000 , ii1I , i1i1iiIIiiiII )
if 11 - 11: I1Ii111 % i11iIiiIii % oO0o . IiII
if 92 - 92: II111iiii
if 45 - 45: O0 % I1IiiI - iII111i . OoO0O00
if 42 - 42: iII111i / o0oOOo0O0Ooo + Oo0Ooo . Oo0Ooo % OOooOOo
if 16 - 16: i1IIi + OoO0O00 % OoOoOO00 + Ii1I * Oo0Ooo
if 3 - 3: i11iIiiIii
if 81 - 81: I1IiiI . OoooooooOO * Ii1I . oO0o - O0 * oO0o
if 72 - 72: II111iiii - OOooOOo + I1IiiI - I11i
def lisp_get_local_interfaces ( ) :
for oO00O in netifaces . interfaces ( ) :
II111IiiiI1 = lisp_interface ( oO00O )
II111IiiiI1 . add_interface ( )
if 75 - 75: ooOoO0o
return
if 29 - 29: I1ii11iIi11i
if 53 - 53: i11iIiiIii . I1ii11iIi11i % Ii1I / ooOoO0o % iIii1I11I1II1
if 6 - 6: Oo0Ooo - OOooOOo . iIii1I11I1II1
if 30 - 30: ooOoO0o + ooOoO0o % IiII - o0oOOo0O0Ooo - I1ii11iIi11i
if 36 - 36: I11i % OOooOOo
if 72 - 72: I1IiiI / iII111i - O0 + I11i
if 83 - 83: O0
def lisp_get_loopback_address ( ) :
for iIiIi1iI11iiI in netifaces . ifaddresses ( "lo" ) [ netifaces . AF_INET ] :
if ( iIiIi1iI11iiI [ "peer" ] == "127.0.0.1" ) : continue
return ( iIiIi1iI11iiI [ "peer" ] )
if 89 - 89: Oo0Ooo + I1ii11iIi11i - o0oOOo0O0Ooo
return ( None )
if 40 - 40: OoO0O00 + OoO0O00
if 94 - 94: iII111i * iIii1I11I1II1 . I11i
if 13 - 13: iIii1I11I1II1 * OoOoOO00 / I1Ii111 % ooOoO0o + oO0o
if 41 - 41: I1ii11iIi11i
if 5 - 5: Oo0Ooo
if 100 - 100: Ii1I + iIii1I11I1II1
if 59 - 59: IiII
if 89 - 89: OoOoOO00 % iIii1I11I1II1
def lisp_is_mac_string ( mac_str ) :
i1IiII1i1I = mac_str . split ( "/" )
if ( len ( i1IiII1i1I ) == 2 ) : mac_str = i1IiII1i1I [ 0 ]
return ( len ( mac_str ) == 14 and mac_str . count ( "-" ) == 2 )
if 35 - 35: I1ii11iIi11i + I1Ii111 - OoOoOO00 % oO0o % o0oOOo0O0Ooo % OoOoOO00
if 45 - 45: I1IiiI * OOooOOo % OoO0O00
if 24 - 24: ooOoO0o - I11i * oO0o
if 87 - 87: Ii1I - I1ii11iIi11i % I1ii11iIi11i . oO0o / I1ii11iIi11i
if 6 - 6: OoOoOO00 / iIii1I11I1II1 * OoooooooOO * i11iIiiIii
if 79 - 79: IiII % OoO0O00
if 81 - 81: i11iIiiIii + i11iIiiIii * OoO0O00 + IiII
if 32 - 32: O0 . OoooooooOO
def lisp_get_local_macs ( ) :
for oO00O in netifaces . interfaces ( ) :
if 15 - 15: I1IiiI . OoO0O00
if 17 - 17: i11iIiiIii / Oo0Ooo . OoO0O00 / I1IiiI
if 38 - 38: i1IIi . I1ii11iIi11i % Ii1I + iIii1I11I1II1 + O0
if 47 - 47: OoO0O00 + IiII / II111iiii
if 97 - 97: I1ii11iIi11i / I1IiiI % O0 + i1IIi - ooOoO0o
i1 = oO00O . replace ( ":" , "" )
i1 = oO00O . replace ( "-" , "" )
if ( i1 . isalnum ( ) == False ) : continue
if 38 - 38: o0oOOo0O0Ooo % I1Ii111 + i11iIiiIii + iII111i + ooOoO0o / i11iIiiIii
if 94 - 94: iII111i - Oo0Ooo + oO0o
if 59 - 59: I11i . I1IiiI - iIii1I11I1II1 + iIii1I11I1II1
if 56 - 56: oO0o + ooOoO0o
if 32 - 32: II111iiii + OoOoOO00 % ooOoO0o / OoOoOO00 + I1ii11iIi11i
try :
IiI11I111 = netifaces . ifaddresses ( oO00O )
except :
continue
if 54 - 54: O0 - iII111i . OOooOOo % iII111i + iII111i
if ( IiI11I111 . has_key ( netifaces . AF_LINK ) == False ) : continue
i1IiII1i1I = IiI11I111 [ netifaces . AF_LINK ] [ 0 ] [ "addr" ]
i1IiII1i1I = i1IiII1i1I . replace ( ":" , "" )
if 36 - 36: OOooOOo % i11iIiiIii
if 47 - 47: i1IIi + II111iiii . Oo0Ooo * oO0o . I11i / i1IIi
if 50 - 50: I1Ii111 / i1IIi % OoooooooOO
if 83 - 83: I1ii11iIi11i * I1ii11iIi11i + OOooOOo
if 57 - 57: O0 - O0 . I1ii11iIi11i / o0oOOo0O0Ooo / Ii1I
if ( len ( i1IiII1i1I ) < 12 ) : continue
if 20 - 20: OOooOOo * II111iiii - OoOoOO00 - oO0o * I1Ii111
if ( lisp_mymacs . has_key ( i1IiII1i1I ) == False ) : lisp_mymacs [ i1IiII1i1I ] = [ ]
lisp_mymacs [ i1IiII1i1I ] . append ( oO00O )
if 6 - 6: ooOoO0o + OOooOOo / Oo0Ooo + IiII % II111iiii / OoO0O00
if 45 - 45: OoooooooOO
lprint ( "Local MACs are: {}" . format ( lisp_mymacs ) )
return
if 9 - 9: I11i . OoO0O00 * i1IIi . OoooooooOO
if 32 - 32: OoOoOO00 . I1ii11iIi11i % I1IiiI - II111iiii
if 11 - 11: O0 + I1IiiI
if 80 - 80: oO0o % oO0o % O0 - i11iIiiIii . iII111i / O0
if 13 - 13: I1IiiI + O0 - I1ii11iIi11i % Oo0Ooo / Ii1I . i1IIi
if 60 - 60: Oo0Ooo . IiII % I1IiiI - I1Ii111
if 79 - 79: OoooooooOO / I1ii11iIi11i . O0
if 79 - 79: oO0o - II111iiii
def lisp_get_local_rloc ( ) :
Ii1iiI1 = commands . getoutput ( "netstat -rn | egrep 'default|0.0.0.0'" )
if ( Ii1iiI1 == "" ) : return ( lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 ) )
if 76 - 76: Ii1I * iIii1I11I1II1
if 31 - 31: i11iIiiIii + OOooOOo - O0
if 51 - 51: OoO0O00 * i1IIi / Ii1I * OOooOOo + ooOoO0o % I1ii11iIi11i
if 34 - 34: oO0o * OoooooooOO + Ii1I + i11iIiiIii
Ii1iiI1 = Ii1iiI1 . split ( "\n" ) [ 0 ]
oO00O = Ii1iiI1 . split ( ) [ - 1 ]
if 22 - 22: i1IIi
iIiIi1iI11iiI = ""
I11io0Oo = lisp_is_macos ( )
if ( I11io0Oo ) :
Ii1iiI1 = commands . getoutput ( "ifconfig {} | egrep 'inet '" . format ( oO00O ) )
if ( Ii1iiI1 == "" ) : return ( lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 ) )
else :
iiI1i = 'ip addr show | egrep "inet " | egrep "{}"' . format ( oO00O )
Ii1iiI1 = commands . getoutput ( iiI1i )
if ( Ii1iiI1 == "" ) :
iiI1i = 'ip addr show | egrep "inet " | egrep "global lo"'
Ii1iiI1 = commands . getoutput ( iiI1i )
if 3 - 3: IiII / I11i
if ( Ii1iiI1 == "" ) : return ( lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 ) )
if 34 - 34: i11iIiiIii / I1Ii111 * OOooOOo . Oo0Ooo
if 79 - 79: I1Ii111
if 31 - 31: OOooOOo % I1Ii111
if 98 - 98: IiII * iIii1I11I1II1 . Ii1I * Oo0Ooo / I1ii11iIi11i + ooOoO0o
if 25 - 25: oO0o
if 19 - 19: I1IiiI % Ii1I . IiII * ooOoO0o
iIiIi1iI11iiI = ""
Ii1iiI1 = Ii1iiI1 . split ( "\n" )
if 89 - 89: OoOoOO00 . OOooOOo
for IIIIIiI11Ii in Ii1iiI1 :
ii1iI1iI1 = IIIIIiI11Ii . split ( ) [ 1 ]
if ( I11io0Oo == False ) : ii1iI1iI1 = ii1iI1iI1 . split ( "/" ) [ 0 ]
Iiii1Ii1I = lisp_address ( LISP_AFI_IPV4 , ii1iI1iI1 , 32 , 0 )
return ( Iiii1Ii1I )
if 94 - 94: iIii1I11I1II1 - OoO0O00 . Oo0Ooo
return ( lisp_address ( LISP_AFI_IPV4 , iIiIi1iI11iiI , 32 , 0 ) )
if 59 - 59: OoO0O00 - OoO0O00 + iII111i
if 32 - 32: i1IIi / Oo0Ooo - O0
if 85 - 85: Ii1I - O0 * i11iIiiIii . i1IIi
if 20 - 20: iII111i / OOooOOo
if 28 - 28: ooOoO0o * I11i % i11iIiiIii * iII111i / Ii1I
if 41 - 41: OOooOOo - o0oOOo0O0Ooo + Ii1I
if 15 - 15: I11i / o0oOOo0O0Ooo + Ii1I
if 76 - 76: Ii1I + OoooooooOO / OOooOOo % OoO0O00 / I1ii11iIi11i
if 38 - 38: I1Ii111 . iII111i . I1IiiI * OoO0O00
if 69 - 69: o0oOOo0O0Ooo % i11iIiiIii / Ii1I
if 93 - 93: ooOoO0o
def lisp_get_local_addresses ( ) :
global lisp_myrlocs
if 34 - 34: oO0o - ooOoO0o * Oo0Ooo / o0oOOo0O0Ooo
if 19 - 19: I1ii11iIi11i
if 46 - 46: iIii1I11I1II1 . i11iIiiIii - OoOoOO00 % O0 / II111iiii * i1IIi
if 66 - 66: O0
if 52 - 52: OoO0O00 * OoooooooOO
if 12 - 12: O0 + IiII * i1IIi . OoO0O00
if 71 - 71: I1Ii111 - o0oOOo0O0Ooo - OOooOOo
if 28 - 28: iIii1I11I1II1
if 7 - 7: o0oOOo0O0Ooo % IiII * OoOoOO00
if 58 - 58: IiII / I11i + II111iiii % iII111i - OoooooooOO
II1iII1i1i = None
oo0OOo0O = 1
o00oO0O0oo0o = os . getenv ( "LISP_ADDR_SELECT" )
if ( o00oO0O0oo0o != None and o00oO0O0oo0o != "" ) :
o00oO0O0oo0o = o00oO0O0oo0o . split ( ":" )
if ( len ( o00oO0O0oo0o ) == 2 ) :
II1iII1i1i = o00oO0O0oo0o [ 0 ]
oo0OOo0O = o00oO0O0oo0o [ 1 ]
else :
if ( o00oO0O0oo0o [ 0 ] . isdigit ( ) ) :
oo0OOo0O = o00oO0O0oo0o [ 0 ]
else :
II1iII1i1i = o00oO0O0oo0o [ 0 ]
if 46 - 46: OoOoOO00 - O0
if 70 - 70: I11i + Oo0Ooo * iIii1I11I1II1 . I1IiiI * I11i
oo0OOo0O = 1 if ( oo0OOo0O == "" ) else int ( oo0OOo0O )
if 49 - 49: o0oOOo0O0Ooo
if 25 - 25: iII111i . OoooooooOO * iIii1I11I1II1 . o0oOOo0O0Ooo / O0 + Ii1I
ooo0o0 = [ None , None , None ]
O00Oooo00 = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
ooO0 = lisp_address ( LISP_AFI_IPV6 , "" , 128 , 0 )
ii111iiIii = None
if 57 - 57: o0oOOo0O0Ooo / I1Ii111
for oO00O in netifaces . interfaces ( ) :
if ( II1iII1i1i != None and II1iII1i1i != oO00O ) : continue
o00oo0OO0 = netifaces . ifaddresses ( oO00O )
if ( o00oo0OO0 == { } ) : continue
if 13 - 13: OoooooooOO + OoO0O00
if 32 - 32: O0 + oO0o % Oo0Ooo
if 7 - 7: I1ii11iIi11i / ooOoO0o
if 11 - 11: IiII * ooOoO0o / ooOoO0o - OOooOOo
ii111iiIii = lisp_get_interface_instance_id ( oO00O , None )
if 68 - 68: I1IiiI % IiII - IiII / I1IiiI + I1ii11iIi11i - Oo0Ooo
if 65 - 65: ooOoO0o - i1IIi
if 62 - 62: I11i / oO0o % Oo0Ooo . OoooooooOO / i11iIiiIii / I1Ii111
if 60 - 60: I1IiiI % oO0o / o0oOOo0O0Ooo % oO0o * i11iIiiIii / iII111i
if ( o00oo0OO0 . has_key ( netifaces . AF_INET ) ) :
i11Ii1iIiII = o00oo0OO0 [ netifaces . AF_INET ]
i1Ii11II = 0
for iIiIi1iI11iiI in i11Ii1iIiII :
O00Oooo00 . store_address ( iIiIi1iI11iiI [ "addr" ] )
if ( O00Oooo00 . is_ipv4_loopback ( ) ) : continue
if ( O00Oooo00 . is_ipv4_link_local ( ) ) : continue
if ( O00Oooo00 . address == 0 ) : continue
i1Ii11II += 1
O00Oooo00 . instance_id = ii111iiIii
if ( II1iII1i1i == None and
lisp_db_for_lookups . lookup_cache ( O00Oooo00 , False ) ) : continue
ooo0o0 [ 0 ] = O00Oooo00
if ( i1Ii11II == oo0OOo0O ) : break
if 33 - 33: IiII . OoooooooOO . oO0o
if 15 - 15: I1ii11iIi11i . iII111i
if ( o00oo0OO0 . has_key ( netifaces . AF_INET6 ) ) :
O0oOo00Ooo0o0 = o00oo0OO0 [ netifaces . AF_INET6 ]
i1Ii11II = 0
for iIiIi1iI11iiI in O0oOo00Ooo0o0 :
ooOOo0o = iIiIi1iI11iiI [ "addr" ]
ooO0 . store_address ( ooOOo0o )
if ( ooO0 . is_ipv6_string_link_local ( ooOOo0o ) ) : continue
if ( ooO0 . is_ipv6_loopback ( ) ) : continue
i1Ii11II += 1
ooO0 . instance_id = ii111iiIii
if ( II1iII1i1i == None and
lisp_db_for_lookups . lookup_cache ( ooO0 , False ) ) : continue
ooo0o0 [ 1 ] = ooO0
if ( i1Ii11II == oo0OOo0O ) : break
if 94 - 94: I11i . I1IiiI
if 73 - 73: i1IIi / II111iiii
if 45 - 45: Ii1I / ooOoO0o . OoooooooOO + OoO0O00
if 51 - 51: iII111i % i11iIiiIii % IiII + I1Ii111 % I1ii11iIi11i
if 16 - 16: OoOoOO00 / Oo0Ooo + O0 - OoOoOO00 . OoooooooOO
if 19 - 19: o0oOOo0O0Ooo
if ( ooo0o0 [ 0 ] == None ) : continue
if 73 - 73: I1Ii111 * Oo0Ooo * OoOoOO00
ooo0o0 [ 2 ] = oO00O
break
if 65 - 65: i11iIiiIii + Oo0Ooo * OoooooooOO - OoO0O00
if 26 - 26: o0oOOo0O0Ooo % OOooOOo + OOooOOo % I11i * i11iIiiIii / iII111i
OOoO0oO00o = ooo0o0 [ 0 ] . print_address_no_iid ( ) if ooo0o0 [ 0 ] else "none"
OOO0OoO0oo0OO = ooo0o0 [ 1 ] . print_address_no_iid ( ) if ooo0o0 [ 1 ] else "none"
oO00O = ooo0o0 [ 2 ] if ooo0o0 [ 2 ] else "none"
if 31 - 31: I11i * oO0o . Ii1I
II1iII1i1i = " (user selected)" if II1iII1i1i != None else ""
if 35 - 35: I11i
OOoO0oO00o = red ( OOoO0oO00o , False )
OOO0OoO0oo0OO = red ( OOO0OoO0oo0OO , False )
oO00O = bold ( oO00O , False )
lprint ( "Local addresses are IPv4: {}, IPv6: {} from device {}{}, iid {}" . format ( OOoO0oO00o , OOO0OoO0oo0OO , oO00O , II1iII1i1i , ii111iiIii ) )
if 94 - 94: ooOoO0o / i11iIiiIii % O0
if 70 - 70: I11i - Oo0Ooo / OoooooooOO % OoooooooOO
lisp_myrlocs = ooo0o0
return ( ( ooo0o0 [ 0 ] != None ) )
if 95 - 95: OoooooooOO % OoooooooOO . Ii1I
if 26 - 26: oO0o + IiII - II111iiii . II111iiii + I1ii11iIi11i + OoOoOO00
if 68 - 68: O0
if 76 - 76: I1ii11iIi11i
if 99 - 99: o0oOOo0O0Ooo
if 1 - 1: Ii1I * OoOoOO00 * OoO0O00 + Oo0Ooo
if 90 - 90: I1Ii111 % Oo0Ooo - Oo0Ooo . iIii1I11I1II1 / OOooOOo + I11i
if 89 - 89: oO0o
if 87 - 87: iII111i % Oo0Ooo
def lisp_get_all_addresses ( ) :
OOo000o = [ ]
for II111IiiiI1 in netifaces . interfaces ( ) :
try : iiIIIIiI111 = netifaces . ifaddresses ( II111IiiiI1 )
except : continue
if 86 - 86: II111iiii % iIii1I11I1II1 / I1ii11iIi11i - o0oOOo0O0Ooo * Ii1I . I1IiiI
if ( iiIIIIiI111 . has_key ( netifaces . AF_INET ) ) :
for iIiIi1iI11iiI in iiIIIIiI111 [ netifaces . AF_INET ] :
ii1iI1iI1 = iIiIi1iI11iiI [ "addr" ]
if ( ii1iI1iI1 . find ( "127.0.0.1" ) != - 1 ) : continue
OOo000o . append ( ii1iI1iI1 )
if 68 - 68: OoooooooOO * iIii1I11I1II1 + i1IIi - i1IIi
if 76 - 76: OoO0O00 . OoooooooOO % I1Ii111 * Ii1I
if ( iiIIIIiI111 . has_key ( netifaces . AF_INET6 ) ) :
for iIiIi1iI11iiI in iiIIIIiI111 [ netifaces . AF_INET6 ] :
ii1iI1iI1 = iIiIi1iI11iiI [ "addr" ]
if ( ii1iI1iI1 == "::1" ) : continue
if ( ii1iI1iI1 [ 0 : 5 ] == "fe80:" ) : continue
OOo000o . append ( ii1iI1iI1 )
if 23 - 23: IiII + iIii1I11I1II1
if 14 - 14: O0 % IiII % Ii1I * oO0o
if 65 - 65: I11i % oO0o + I1ii11iIi11i
return ( OOo000o )
if 86 - 86: iIii1I11I1II1 / O0 . I1Ii111 % iIii1I11I1II1 % Oo0Ooo
if 86 - 86: i11iIiiIii - o0oOOo0O0Ooo . ooOoO0o * Oo0Ooo / Ii1I % o0oOOo0O0Ooo
if 61 - 61: o0oOOo0O0Ooo + OoOoOO00
if 15 - 15: OoOoOO00 * oO0o + OOooOOo . I11i % I1IiiI - ooOoO0o
if 13 - 13: OoOoOO00 % OoOoOO00 % Oo0Ooo % I1IiiI * i1IIi % I11i
if 82 - 82: IiII . OoOoOO00 / ooOoO0o + iII111i - ooOoO0o
if 55 - 55: ooOoO0o % Oo0Ooo % o0oOOo0O0Ooo
if 29 - 29: IiII / iIii1I11I1II1 + I1ii11iIi11i % iII111i % I11i
def lisp_get_all_multicast_rles ( ) :
i1i = [ ]
Ii1iiI1 = commands . getoutput ( 'egrep "rle-address =" ./lisp.config' )
if ( Ii1iiI1 == "" ) : return ( i1i )
if 15 - 15: I11i % i11iIiiIii
O0o0O00o0o = Ii1iiI1 . split ( "\n" )
for IIIIIiI11Ii in O0o0O00o0o :
if ( IIIIIiI11Ii [ 0 ] == "#" ) : continue
II1IIiiI1 = IIIIIiI11Ii . split ( "rle-address = " ) [ 1 ]
O00O00 = int ( II1IIiiI1 . split ( "." ) [ 0 ] )
if ( O00O00 >= 224 and O00O00 < 240 ) : i1i . append ( II1IIiiI1 )
if 66 - 66: Oo0Ooo - iIii1I11I1II1
return ( i1i )
if 9 - 9: o0oOOo0O0Ooo % I1ii11iIi11i . I1ii11iIi11i
if 28 - 28: OoooooooOO % oO0o + I1ii11iIi11i + O0 . I1Ii111
if 80 - 80: i11iIiiIii % I1ii11iIi11i
if 54 - 54: o0oOOo0O0Ooo + I11i - iIii1I11I1II1 % ooOoO0o % IiII
if 19 - 19: I1ii11iIi11i / iIii1I11I1II1 % i1IIi . OoooooooOO
if 57 - 57: ooOoO0o . Oo0Ooo - OoO0O00 - i11iIiiIii * I1Ii111 / o0oOOo0O0Ooo
if 79 - 79: I1ii11iIi11i + o0oOOo0O0Ooo % Oo0Ooo * o0oOOo0O0Ooo
if 21 - 21: iII111i
class lisp_packet ( ) :
def __init__ ( self , packet ) :
self . outer_source = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . outer_dest = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . outer_tos = 0
self . outer_ttl = 0
self . udp_sport = 0
self . udp_dport = 0
self . udp_length = 0
self . udp_checksum = 0
self . inner_source = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . inner_dest = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . inner_tos = 0
self . inner_ttl = 0
self . inner_protocol = 0
self . inner_sport = 0
self . inner_dport = 0
self . lisp_header = lisp_data_header ( )
self . packet = packet
self . inner_version = 0
self . outer_version = 0
self . encap_port = LISP_DATA_PORT
self . inner_is_fragment = False
self . packet_error = ""
self . gleaned_dest = False
if 24 - 24: iII111i / ooOoO0o
if 61 - 61: iIii1I11I1II1 + oO0o
def encode ( self , nonce ) :
if 8 - 8: I1Ii111 + OoO0O00
if 9 - 9: OOooOOo + o0oOOo0O0Ooo
if 8 - 8: OOooOOo * Oo0Ooo / iII111i - OoO0O00 - OoooooooOO
if 100 - 100: oO0o . iIii1I11I1II1 . iIii1I11I1II1
if 55 - 55: oO0o
if ( self . outer_source . is_null ( ) ) : return ( None )
if 37 - 37: IiII / i11iIiiIii / Oo0Ooo
if 97 - 97: I1Ii111 . I11i / I1IiiI
if 83 - 83: I11i - I1ii11iIi11i * oO0o
if 90 - 90: Oo0Ooo * I1IiiI
if 75 - 75: I1ii11iIi11i - OoOoOO00 * i11iIiiIii . OoooooooOO - Oo0Ooo . I11i
if 6 - 6: I11i * oO0o / OoooooooOO % Ii1I * o0oOOo0O0Ooo
if ( nonce == None ) :
self . lisp_header . nonce ( lisp_get_data_nonce ( ) )
elif ( self . lisp_header . is_request_nonce ( nonce ) ) :
self . lisp_header . request_nonce ( nonce )
else :
self . lisp_header . nonce ( nonce )
if 28 - 28: IiII * I1IiiI % IiII
self . lisp_header . instance_id ( self . inner_dest . instance_id )
if 95 - 95: O0 / I11i . I1Ii111
if 17 - 17: I11i
if 56 - 56: ooOoO0o * o0oOOo0O0Ooo + I11i
if 48 - 48: IiII * OoO0O00 % I1Ii111 - I11i
if 72 - 72: i1IIi % ooOoO0o % IiII % oO0o - oO0o
if 97 - 97: o0oOOo0O0Ooo * O0 / o0oOOo0O0Ooo * OoO0O00 * Oo0Ooo
self . lisp_header . key_id ( 0 )
iiI1iiii1Iii = ( self . lisp_header . get_instance_id ( ) == 0xffffff )
if ( lisp_data_plane_security and iiI1iiii1Iii == False ) :
ooOOo0o = self . outer_dest . print_address_no_iid ( ) + ":" + str ( self . encap_port )
if 94 - 94: i11iIiiIii % oO0o + Oo0Ooo + oO0o
if ( lisp_crypto_keys_by_rloc_encap . has_key ( ooOOo0o ) ) :
i1iIi = lisp_crypto_keys_by_rloc_encap [ ooOOo0o ]
if ( i1iIi [ 1 ] ) :
i1iIi [ 1 ] . use_count += 1
oOo , ooOo0o = self . encrypt ( i1iIi [ 1 ] , ooOOo0o )
if ( ooOo0o ) : self . packet = oOo
if 44 - 44: Oo0Ooo . Oo0Ooo + OoooooooOO * i11iIiiIii / I11i + I1Ii111
if 17 - 17: OOooOOo + II111iiii
if 43 - 43: I11i % Ii1I / o0oOOo0O0Ooo * I1Ii111
if 85 - 85: iIii1I11I1II1 . OoooooooOO . o0oOOo0O0Ooo
if 77 - 77: I1IiiI % ooOoO0o
if 74 - 74: OoOoOO00 / i1IIi % OoooooooOO
if 52 - 52: IiII % ooOoO0o
if 25 - 25: I11i / I11i % OoooooooOO - I1ii11iIi11i * oO0o
self . udp_checksum = 0
if ( self . encap_port == LISP_DATA_PORT ) :
if ( lisp_crypto_ephem_port == None ) :
if ( self . gleaned_dest ) :
self . udp_sport = LISP_DATA_PORT
else :
self . hash_packet ( )
if 23 - 23: i11iIiiIii
else :
self . udp_sport = lisp_crypto_ephem_port
if 100 - 100: oO0o + O0 . I1IiiI + i1IIi - OoOoOO00 + o0oOOo0O0Ooo
else :
self . udp_sport = LISP_DATA_PORT
if 65 - 65: II111iiii / Oo0Ooo
self . udp_dport = self . encap_port
self . udp_length = len ( self . packet ) + 16
if 42 - 42: i11iIiiIii . O0
if 75 - 75: I1Ii111 + iIii1I11I1II1
if 19 - 19: I1IiiI + i11iIiiIii . IiII - I11i / Ii1I + o0oOOo0O0Ooo
if 38 - 38: Oo0Ooo / iIii1I11I1II1 * iIii1I11I1II1 % I1ii11iIi11i
if ( self . outer_version == 4 ) :
O00o = socket . htons ( self . udp_sport )
o0o0ooOo00 = socket . htons ( self . udp_dport )
else :
O00o = self . udp_sport
o0o0ooOo00 = self . udp_dport
if 91 - 91: OoO0O00 * I1Ii111 % OoO0O00 . o0oOOo0O0Ooo * I1ii11iIi11i . OOooOOo
if 13 - 13: I1ii11iIi11i
o0o0ooOo00 = socket . htons ( self . udp_dport ) if self . outer_version == 4 else self . udp_dport
if 80 - 80: Oo0Ooo % IiII % OoooooooOO * Oo0Ooo % Ii1I
if 41 - 41: OoooooooOO / i1IIi
OOOOo00oo00O = struct . pack ( "HHHH" , O00o , o0o0ooOo00 , socket . htons ( self . udp_length ) ,
self . udp_checksum )
if 70 - 70: OoOoOO00 % o0oOOo0O0Ooo % i1IIi / I1ii11iIi11i % i11iIiiIii / i1IIi
if 4 - 4: IiII
if 93 - 93: oO0o % i1IIi
if 83 - 83: I1IiiI . Oo0Ooo - I11i . o0oOOo0O0Ooo
ooo00o0o0 = self . lisp_header . encode ( )
if 54 - 54: Ii1I % I11i . OOooOOo + oO0o * iII111i - i1IIi
if 27 - 27: Ii1I % i1IIi . Oo0Ooo % I1Ii111
if 10 - 10: IiII / OoooooooOO
if 50 - 50: i11iIiiIii - OoooooooOO . oO0o + O0 . i1IIi
if 91 - 91: o0oOOo0O0Ooo . iII111i % Oo0Ooo - iII111i . oO0o % i11iIiiIii
if ( self . outer_version == 4 ) :
iIi = socket . htons ( self . udp_length + 20 )
O0O = socket . htons ( 0x4000 )
oOOoooo = struct . pack ( "BBHHHBBH" , 0x45 , self . outer_tos , iIi , 0xdfdf ,
O0O , self . outer_ttl , 17 , 0 )
oOOoooo += self . outer_source . pack_address ( )
oOOoooo += self . outer_dest . pack_address ( )
oOOoooo = lisp_ip_checksum ( oOOoooo )
elif ( self . outer_version == 6 ) :
oOOoooo = ""
if 70 - 70: iII111i . II111iiii . iII111i - iIii1I11I1II1
if 92 - 92: OoO0O00
if 15 - 15: IiII / IiII + iIii1I11I1II1 % OoooooooOO
if 12 - 12: ooOoO0o
if 36 - 36: I1Ii111 . IiII * OoooooooOO - o0oOOo0O0Ooo
if 60 - 60: OOooOOo . iII111i / iIii1I11I1II1 + OOooOOo * I1Ii111
if 82 - 82: i11iIiiIii . iIii1I11I1II1 * I1IiiI - I11i + Ii1I
else :
return ( None )
if 48 - 48: I1ii11iIi11i
if 96 - 96: ooOoO0o . OoooooooOO
self . packet = oOOoooo + OOOOo00oo00O + ooo00o0o0 + self . packet
return ( self )
if 39 - 39: OOooOOo + OoO0O00
if 80 - 80: OOooOOo % OoO0O00 / OoOoOO00
def cipher_pad ( self , packet ) :
OOOOO000oo0 = len ( packet )
if ( ( OOOOO000oo0 % 16 ) != 0 ) :
I1iI111ii111i = ( ( OOOOO000oo0 / 16 ) + 1 ) * 16
packet = packet . ljust ( I1iI111ii111i )
if 83 - 83: iIii1I11I1II1
return ( packet )
if 97 - 97: i11iIiiIii + Oo0Ooo * OOooOOo % iII111i . IiII
if 4 - 4: O0 . iII111i - iIii1I11I1II1
def encrypt ( self , key , addr_str ) :
if ( key == None or key . shared_key == None ) :
return ( [ self . packet , False ] )
if 19 - 19: OOooOOo % OoO0O00 / Ii1I + II111iiii % OoooooooOO
if 89 - 89: Ii1I
if 51 - 51: iII111i
if 68 - 68: iII111i - o0oOOo0O0Ooo * OoO0O00 % ooOoO0o . ooOoO0o - iIii1I11I1II1
if 22 - 22: OoooooooOO / I1ii11iIi11i % iII111i * OoOoOO00
oOo = self . cipher_pad ( self . packet )
Ii1IiiiI1ii = key . get_iv ( )
if 55 - 55: I1ii11iIi11i
OOOO0O00o = lisp_get_timestamp ( )
oOoo0OO0 = None
if ( key . cipher_suite == LISP_CS_25519_CHACHA ) :
iiIiIi1111iI1 = chacha . ChaCha ( key . encrypt_key , Ii1IiiiI1ii ) . encrypt
elif ( key . cipher_suite == LISP_CS_25519_GCM ) :
III = binascii . unhexlify ( key . encrypt_key )
try :
OoO0o = AES . new ( III , AES . MODE_GCM , Ii1IiiiI1ii )
iiIiIi1111iI1 = OoO0o . encrypt
oOoo0OO0 = OoO0o . digest
except :
lprint ( "You need AES-GCM, do a 'pip install pycryptodome'" )
return ( [ self . packet , False ] )
if 72 - 72: OOooOOo % OoooooooOO % o0oOOo0O0Ooo * OOooOOo % I1IiiI * Ii1I
else :
III = binascii . unhexlify ( key . encrypt_key )
iiIiIi1111iI1 = AES . new ( III , AES . MODE_CBC , Ii1IiiiI1ii ) . encrypt
if 34 - 34: OoO0O00 * Ii1I * Oo0Ooo
if 21 - 21: OoooooooOO . OoOoOO00 - iIii1I11I1II1 % IiII
Oooo0ooOoo0 = iiIiIi1111iI1 ( oOo )
if 26 - 26: IiII / iIii1I11I1II1 - iIii1I11I1II1
if ( Oooo0ooOoo0 == None ) : return ( [ self . packet , False ] )
OOOO0O00o = int ( str ( time . time ( ) - OOOO0O00o ) . split ( "." ) [ 1 ] [ 0 : 6 ] )
if 57 - 57: IiII
if 41 - 41: iIii1I11I1II1 * iII111i + Oo0Ooo * o0oOOo0O0Ooo % IiII / OOooOOo
if 63 - 63: i1IIi % i11iIiiIii % II111iiii * OoooooooOO
if 40 - 40: Oo0Ooo
if 47 - 47: OoOoOO00
if 65 - 65: O0 + I1Ii111 % Ii1I * I1IiiI / ooOoO0o / OoOoOO00
if ( oOoo0OO0 != None ) : Oooo0ooOoo0 += oOoo0OO0 ( )
if 71 - 71: i11iIiiIii / OoOoOO00 . oO0o
if 33 - 33: oO0o
if 39 - 39: OoO0O00 + O0 + ooOoO0o * II111iiii % O0 - O0
if 41 - 41: IiII % o0oOOo0O0Ooo
if 67 - 67: O0 % I1Ii111
self . lisp_header . key_id ( key . key_id )
ooo00o0o0 = self . lisp_header . encode ( )
if 35 - 35: I1IiiI . OoOoOO00 + OoooooooOO % Oo0Ooo % OOooOOo
iIi1Ii1111i = key . do_icv ( ooo00o0o0 + Ii1IiiiI1ii + Oooo0ooOoo0 , Ii1IiiiI1ii )
if 16 - 16: IiII . ooOoO0o . OoO0O00
o0oO0oo = 4 if ( key . do_poly ) else 8
if 98 - 98: OoooooooOO - I1IiiI + ooOoO0o
O0I11IIIII = bold ( "Encrypt" , False )
OoO = bold ( key . cipher_suite_string , False )
addr_str = "RLOC: " + red ( addr_str , False )
II11IiI1 = "poly" if key . do_poly else "sha256"
II11IiI1 = bold ( II11IiI1 , False )
OoOOOO00oOO = "ICV({}): 0x{}...{}" . format ( II11IiI1 , iIi1Ii1111i [ 0 : o0oO0oo ] , iIi1Ii1111i [ - o0oO0oo : : ] )
dprint ( "{} for key-id: {}, {}, {}, {}-time: {} usec" . format ( O0I11IIIII , key . key_id , addr_str , OoOOOO00oOO , OoO , OOOO0O00o ) )
if 4 - 4: i1IIi + OoOoOO00
if 39 - 39: iIii1I11I1II1 + ooOoO0o
iIi1Ii1111i = int ( iIi1Ii1111i , 16 )
if ( key . do_poly ) :
o00oOoo0o00 = byte_swap_64 ( ( iIi1Ii1111i >> 64 ) & LISP_8_64_MASK )
iIiiI11II11i = byte_swap_64 ( iIi1Ii1111i & LISP_8_64_MASK )
iIi1Ii1111i = struct . pack ( "QQ" , o00oOoo0o00 , iIiiI11II11i )
else :
o00oOoo0o00 = byte_swap_64 ( ( iIi1Ii1111i >> 96 ) & LISP_8_64_MASK )
iIiiI11II11i = byte_swap_64 ( ( iIi1Ii1111i >> 32 ) & LISP_8_64_MASK )
o00OoO0o0 = socket . htonl ( iIi1Ii1111i & 0xffffffff )
iIi1Ii1111i = struct . pack ( "QQI" , o00oOoo0o00 , iIiiI11II11i , o00OoO0o0 )
if 52 - 52: iII111i . oO0o - Ii1I
if 85 - 85: I1ii11iIi11i / i1IIi * OoO0O00 . oO0o
return ( [ Ii1IiiiI1ii + Oooo0ooOoo0 + iIi1Ii1111i , True ] )
if 60 - 60: I11i
if 93 - 93: Oo0Ooo
def decrypt ( self , packet , header_length , key , addr_str ) :
if 75 - 75: OoOoOO00
if 64 - 64: IiII / o0oOOo0O0Ooo / i1IIi
if 79 - 79: OOooOOo % I1Ii111 / oO0o - iIii1I11I1II1 - OoOoOO00
if 60 - 60: II111iiii
if 90 - 90: OoOoOO00
if 37 - 37: OoOoOO00 + O0 . O0 * Oo0Ooo % I1Ii111 / iII111i
if ( key . do_poly ) :
o00oOoo0o00 , iIiiI11II11i = struct . unpack ( "QQ" , packet [ - 16 : : ] )
iIIi = byte_swap_64 ( o00oOoo0o00 ) << 64
iIIi |= byte_swap_64 ( iIiiI11II11i )
iIIi = lisp_hex_string ( iIIi ) . zfill ( 32 )
packet = packet [ 0 : - 16 ]
o0oO0oo = 4
OOOo00o = bold ( "poly" , False )
else :
o00oOoo0o00 , iIiiI11II11i , o00OoO0o0 = struct . unpack ( "QQI" , packet [ - 20 : : ] )
iIIi = byte_swap_64 ( o00oOoo0o00 ) << 96
iIIi |= byte_swap_64 ( iIiiI11II11i ) << 32
iIIi |= socket . htonl ( o00OoO0o0 )
iIIi = lisp_hex_string ( iIIi ) . zfill ( 40 )
packet = packet [ 0 : - 20 ]
o0oO0oo = 8
OOOo00o = bold ( "sha" , False )
if 3 - 3: o0oOOo0O0Ooo
ooo00o0o0 = self . lisp_header . encode ( )
if 16 - 16: i1IIi . i1IIi / I1Ii111 % OoOoOO00 / I1IiiI * I1ii11iIi11i
if 30 - 30: o0oOOo0O0Ooo + OoooooooOO + OOooOOo / II111iiii * Oo0Ooo
if 59 - 59: Ii1I / OoOoOO00 * OoO0O00 * iII111i % oO0o
if 61 - 61: Oo0Ooo - O0 - OoooooooOO
if ( key . cipher_suite == LISP_CS_25519_CHACHA ) :
Ii1I1Iiii = 8
OoO = bold ( "chacha" , False )
elif ( key . cipher_suite == LISP_CS_25519_GCM ) :
Ii1I1Iiii = 12
OoO = bold ( "aes-gcm" , False )
else :
Ii1I1Iiii = 16
OoO = bold ( "aes-cbc" , False )
if 80 - 80: OOooOOo . Ii1I + iIii1I11I1II1
Ii1IiiiI1ii = packet [ 0 : Ii1I1Iiii ]
if 32 - 32: I1IiiI
if 47 - 47: I1ii11iIi11i * oO0o + iIii1I11I1II1 - oO0o / IiII
if 86 - 86: IiII
if 43 - 43: I1IiiI / iII111i / ooOoO0o + iIii1I11I1II1 + OoooooooOO
iiI111i1 = key . do_icv ( ooo00o0o0 + packet , Ii1IiiiI1ii )
if 41 - 41: i11iIiiIii * O0 - iII111i . II111iiii % OoO0O00 % I1ii11iIi11i
I1I11i = "0x{}...{}" . format ( iIIi [ 0 : o0oO0oo ] , iIIi [ - o0oO0oo : : ] )
Iii1Iii = "0x{}...{}" . format ( iiI111i1 [ 0 : o0oO0oo ] , iiI111i1 [ - o0oO0oo : : ] )
if 91 - 91: ooOoO0o * IiII * II111iiii
if ( iiI111i1 != iIIi ) :
self . packet_error = "ICV-error"
oooO0oooOo000 = OoO + "/" + OOOo00o
ooOOO0o = bold ( "ICV failed ({})" . format ( oooO0oooOo000 ) , False )
OoOOOO00oOO = "packet-ICV {} != computed-ICV {}" . format ( I1I11i , Iii1Iii )
dprint ( ( "{} from RLOC {}, receive-port: {}, key-id: {}, " + "packet dropped, {}" ) . format ( ooOOO0o , red ( addr_str , False ) ,
# II111iiii / O0 / IiII - I11i - i1IIi
self . udp_sport , key . key_id , OoOOOO00oOO ) )
dprint ( "{}" . format ( key . print_keys ( ) ) )
if 8 - 8: i11iIiiIii . iII111i / iIii1I11I1II1 / I1ii11iIi11i / IiII - Ii1I
if 32 - 32: o0oOOo0O0Ooo . i1IIi * Oo0Ooo
if 98 - 98: Ii1I - II111iiii / I1IiiI . oO0o * IiII . I11i
if 25 - 25: i11iIiiIii / OoOoOO00 - I1Ii111 / OoO0O00 . o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 6 - 6: oO0o . I11i
if 43 - 43: I1ii11iIi11i + o0oOOo0O0Ooo
lisp_retry_decap_keys ( addr_str , ooo00o0o0 + packet , Ii1IiiiI1ii , iIIi )
return ( [ None , False ] )
if 50 - 50: oO0o % i1IIi * O0
if 4 - 4: iIii1I11I1II1 . i1IIi
if 63 - 63: iIii1I11I1II1 + IiII % i1IIi / I1IiiI % II111iiii
if 60 - 60: o0oOOo0O0Ooo . OoOoOO00 % I1Ii111 / I1IiiI / O0
if 19 - 19: i11iIiiIii . I1IiiI + II111iiii / OOooOOo . I1ii11iIi11i * ooOoO0o
packet = packet [ Ii1I1Iiii : : ]
if 59 - 59: iIii1I11I1II1 / I1ii11iIi11i % ooOoO0o
if 84 - 84: iIii1I11I1II1 / I1IiiI . OoOoOO00 % I11i
if 99 - 99: Oo0Ooo + i11iIiiIii
if 36 - 36: Ii1I * I1Ii111 * iIii1I11I1II1 - I11i % i11iIiiIii
OOOO0O00o = lisp_get_timestamp ( )
if ( key . cipher_suite == LISP_CS_25519_CHACHA ) :
OoOo00O0o = chacha . ChaCha ( key . encrypt_key , Ii1IiiiI1ii ) . decrypt
elif ( key . cipher_suite == LISP_CS_25519_GCM ) :
III = binascii . unhexlify ( key . encrypt_key )
try :
OoOo00O0o = AES . new ( III , AES . MODE_GCM , Ii1IiiiI1ii ) . decrypt
except :
self . packet_error = "no-decrypt-key"
lprint ( "You need AES-GCM, do a 'pip install pycryptodome'" )
return ( [ None , False ] )
if 96 - 96: IiII * IiII % ooOoO0o + o0oOOo0O0Ooo
else :
if ( ( len ( packet ) % 16 ) != 0 ) :
dprint ( "Ciphertext not multiple of 16 bytes, packet dropped" )
return ( [ None , False ] )
if 27 - 27: Oo0Ooo * ooOoO0o + i11iIiiIii / I1IiiI - oO0o
III = binascii . unhexlify ( key . encrypt_key )
OoOo00O0o = AES . new ( III , AES . MODE_CBC , Ii1IiiiI1ii ) . decrypt
if 44 - 44: Ii1I * ooOoO0o / OoOoOO00
if 69 - 69: ooOoO0o . OOooOOo - I1IiiI
IiIi = OoOo00O0o ( packet )
OOOO0O00o = int ( str ( time . time ( ) - OOOO0O00o ) . split ( "." ) [ 1 ] [ 0 : 6 ] )
if 44 - 44: II111iiii . II111iiii + OOooOOo * Ii1I
if 16 - 16: II111iiii
if 100 - 100: O0 - i1IIi
if 48 - 48: oO0o % ooOoO0o + O0
O0I11IIIII = bold ( "Decrypt" , False )
addr_str = "RLOC: " + red ( addr_str , False )
II11IiI1 = "poly" if key . do_poly else "sha256"
II11IiI1 = bold ( II11IiI1 , False )
OoOOOO00oOO = "ICV({}): {}" . format ( II11IiI1 , I1I11i )
dprint ( "{} for key-id: {}, {}, {} (good), {}-time: {} usec" . format ( O0I11IIIII , key . key_id , addr_str , OoOOOO00oOO , OoO , OOOO0O00o ) )
if 27 - 27: I1ii11iIi11i / OOooOOo
if 33 - 33: OoooooooOO % I1ii11iIi11i . O0 / I1ii11iIi11i
if 63 - 63: IiII + iIii1I11I1II1 + I1IiiI + I1Ii111
if 72 - 72: OoO0O00 + i11iIiiIii + I1ii11iIi11i
if 96 - 96: oO0o % i1IIi / o0oOOo0O0Ooo
if 13 - 13: II111iiii - Oo0Ooo % i11iIiiIii + iII111i
if 88 - 88: O0 . oO0o % I1IiiI
self . packet = self . packet [ 0 : header_length ]
return ( [ IiIi , True ] )
if 10 - 10: I1IiiI + O0
if 75 - 75: O0 % iIii1I11I1II1 / OoOoOO00 % OOooOOo / IiII
def fragment_outer ( self , outer_hdr , inner_packet ) :
iiI1iiIiiiI1I = 1000
if 6 - 6: OoO0O00
if 99 - 99: o0oOOo0O0Ooo * OOooOOo % oO0o * oO0o + OoooooooOO
if 82 - 82: I11i / OoOoOO00 - OOooOOo / ooOoO0o
if 50 - 50: OOooOOo + OoO0O00 . i11iIiiIii + I1ii11iIi11i + i11iIiiIii
if 31 - 31: oO0o * I1Ii111 . OoOoOO00 * I11i
I1II1I = [ ]
ii = 0
OOOOO000oo0 = len ( inner_packet )
while ( ii < OOOOO000oo0 ) :
O0O = inner_packet [ ii : : ]
if ( len ( O0O ) > iiI1iiIiiiI1I ) : O0O = O0O [ 0 : iiI1iiIiiiI1I ]
I1II1I . append ( O0O )
ii += len ( O0O )
if 7 - 7: I11i + I11i + II111iiii % Ii1I
if 31 - 31: oO0o * OoOoOO00 + OOooOOo
if 58 - 58: o0oOOo0O0Ooo % I1IiiI . I1IiiI * OoO0O00 - IiII . OoooooooOO
if 10 - 10: I1Ii111
if 48 - 48: iII111i * i1IIi % OoooooooOO * Ii1I * OoO0O00
if 7 - 7: iII111i . Ii1I . iII111i - I1Ii111
I1IiiIi11 = [ ]
ii = 0
for O0O in I1II1I :
if 20 - 20: OOooOOo - iII111i / Oo0Ooo * OoO0O00
if 55 - 55: OoooooooOO
if 73 - 73: OoOoOO00 - I1ii11iIi11i % Oo0Ooo + I1ii11iIi11i - O0 . OoO0O00
if 38 - 38: O0
ooO = ii if ( O0O == I1II1I [ - 1 ] ) else 0x2000 + ii
ooO = socket . htons ( ooO )
outer_hdr = outer_hdr [ 0 : 6 ] + struct . pack ( "H" , ooO ) + outer_hdr [ 8 : : ]
if 34 - 34: I1Ii111 * II111iiii
if 71 - 71: IiII
if 97 - 97: I1ii11iIi11i
if 86 - 86: Oo0Ooo - OOooOOo . OoOoOO00 . II111iiii * I1IiiI . II111iiii
II1Ooo0000o00OO = socket . htons ( len ( O0O ) + 20 )
outer_hdr = outer_hdr [ 0 : 2 ] + struct . pack ( "H" , II1Ooo0000o00OO ) + outer_hdr [ 4 : : ]
outer_hdr = lisp_ip_checksum ( outer_hdr )
I1IiiIi11 . append ( outer_hdr + O0O )
ii += len ( O0O ) / 8
if 9 - 9: II111iiii * i11iIiiIii . OOooOOo - OoO0O00
return ( I1IiiIi11 )
if 31 - 31: i11iIiiIii * Ii1I . o0oOOo0O0Ooo % OOooOOo * I1ii11iIi11i % O0
if 77 - 77: OoO0O00 + OoO0O00 . ooOoO0o * OoooooooOO + OoO0O00
def fragment ( self ) :
oOo = self . fix_outer_header ( self . packet )
if 6 - 6: i1IIi - I11i
if 89 - 89: ooOoO0o - I11i . O0 % OoooooooOO . i11iIiiIii
if 35 - 35: II111iiii / OoOoOO00 - O0 . II111iiii
if 55 - 55: Oo0Ooo % i1IIi * I11i
if 95 - 95: OOooOOo / II111iiii - o0oOOo0O0Ooo % I1Ii111 . I11i
if 63 - 63: iIii1I11I1II1 / ooOoO0o
OOOOO000oo0 = len ( oOo )
if ( OOOOO000oo0 <= 1500 ) : return ( [ oOo ] , "Fragment-None" )
if 24 - 24: Oo0Ooo / iIii1I11I1II1 % OOooOOo * OoOoOO00 - iIii1I11I1II1
oOo = self . packet
if 50 - 50: II111iiii
if 39 - 39: II111iiii . OoOoOO00 - Oo0Ooo * i1IIi . OoooooooOO
if 44 - 44: I1IiiI
if 55 - 55: oO0o . I1Ii111 * I1Ii111
if 82 - 82: I1IiiI % OoO0O00 % I11i + I11i
if ( self . inner_version != 4 ) :
i1111I = random . randint ( 0 , 0xffff )
OoO00oo0 = oOo [ 0 : 4 ] + struct . pack ( "H" , i1111I ) + oOo [ 6 : 20 ]
oOOO = oOo [ 20 : : ]
I1IiiIi11 = self . fragment_outer ( OoO00oo0 , oOOO )
return ( I1IiiIi11 , "Fragment-Outer" )
if 62 - 62: Ii1I - oO0o % iIii1I11I1II1
if 57 - 57: OoooooooOO / OoOoOO00
if 44 - 44: OoOoOO00 * i1IIi * O0
if 94 - 94: I1IiiI - O0
if 18 - 18: IiII / oO0o . oO0o . iIii1I11I1II1 . i11iIiiIii
Oo0o0oo0 = 56 if ( self . outer_version == 6 ) else 36
OoO00oo0 = oOo [ 0 : Oo0o0oo0 ]
oOOoOOooO0 = oOo [ Oo0o0oo0 : Oo0o0oo0 + 20 ]
oOOO = oOo [ Oo0o0oo0 + 20 : : ]
if 42 - 42: iIii1I11I1II1 * Ii1I / OoO0O00 + OOooOOo
if 48 - 48: OoooooooOO - I1Ii111 . i11iIiiIii * iII111i - Ii1I - o0oOOo0O0Ooo
if 59 - 59: iII111i / I11i . Oo0Ooo
if 100 - 100: O0
oOOO00Oo = struct . unpack ( "H" , oOOoOOooO0 [ 6 : 8 ] ) [ 0 ]
oOOO00Oo = socket . ntohs ( oOOO00Oo )
Ii1iii1 = os . getenv ( "LISP_IGNORE_DF_BIT" ) != None
if ( oOOO00Oo & 0x4000 ) :
if ( Ii1iii1 ) :
oOOO00Oo &= ~ 0x4000
else :
iii11III1I = bold ( "DF-bit set" , False )
dprint ( "{} in inner header, packet discarded" . format ( iii11III1I ) )
return ( [ ] , "Fragment-None-DF-bit" )
if 61 - 61: I1ii11iIi11i + iIii1I11I1II1 % o0oOOo0O0Ooo
if 78 - 78: iIii1I11I1II1 - II111iiii / I1IiiI
if 9 - 9: I1ii11iIi11i * Ii1I - IiII
ii = 0
OOOOO000oo0 = len ( oOOO )
I1IiiIi11 = [ ]
while ( ii < OOOOO000oo0 ) :
I1IiiIi11 . append ( oOOO [ ii : ii + 1400 ] )
ii += 1400
if 88 - 88: iIii1I11I1II1
if 27 - 27: I11i * i11iIiiIii . OOooOOo + ooOoO0o
if 14 - 14: I1Ii111 * OoO0O00 + I11i - IiII . I1ii11iIi11i * oO0o
if 100 - 100: I11i
if 36 - 36: OoO0O00 + II111iiii * OoOoOO00
I1II1I = I1IiiIi11
I1IiiIi11 = [ ]
i11i1IIIIII = True if oOOO00Oo & 0x2000 else False
oOOO00Oo = ( oOOO00Oo & 0x1fff ) * 8
for O0O in I1II1I :
if 59 - 59: II111iiii . I1ii11iIi11i + I1ii11iIi11i * OoO0O00 * I1IiiI / OoooooooOO
if 15 - 15: ooOoO0o % o0oOOo0O0Ooo / oO0o - II111iiii . iIii1I11I1II1
if 28 - 28: II111iiii * ooOoO0o * Ii1I
if 93 - 93: i1IIi . Ii1I * I1Ii111 . ooOoO0o
O0iI1I1ii11IIi1 = oOOO00Oo / 8
if ( i11i1IIIIII ) :
O0iI1I1ii11IIi1 |= 0x2000
elif ( O0O != I1II1I [ - 1 ] ) :
O0iI1I1ii11IIi1 |= 0x2000
if 100 - 100: Oo0Ooo . Ii1I . I1IiiI % II111iiii - oO0o
O0iI1I1ii11IIi1 = socket . htons ( O0iI1I1ii11IIi1 )
oOOoOOooO0 = oOOoOOooO0 [ 0 : 6 ] + struct . pack ( "H" , O0iI1I1ii11IIi1 ) + oOOoOOooO0 [ 8 : : ]
if 52 - 52: I1IiiI % OoO0O00 * Ii1I * iII111i / OOooOOo
if 88 - 88: oO0o
if 1 - 1: Oo0Ooo
if 95 - 95: OoooooooOO / I11i % OoooooooOO / ooOoO0o * IiII
if 75 - 75: O0
if 56 - 56: OoO0O00 / II111iiii
OOOOO000oo0 = len ( O0O )
oOOO00Oo += OOOOO000oo0
II1Ooo0000o00OO = socket . htons ( OOOOO000oo0 + 20 )
oOOoOOooO0 = oOOoOOooO0 [ 0 : 2 ] + struct . pack ( "H" , II1Ooo0000o00OO ) + oOOoOOooO0 [ 4 : 10 ] + struct . pack ( "H" , 0 ) + oOOoOOooO0 [ 12 : : ]
if 39 - 39: OoOoOO00 - OoooooooOO - i1IIi / II111iiii
oOOoOOooO0 = lisp_ip_checksum ( oOOoOOooO0 )
IIIii1 = oOOoOOooO0 + O0O
if 76 - 76: I1IiiI * OOooOOo
if 12 - 12: iIii1I11I1II1 / I11i % Ii1I
if 49 - 49: OoO0O00 + II111iiii / IiII - O0 % Ii1I
if 27 - 27: OoO0O00 + Oo0Ooo
if 92 - 92: I1IiiI % iII111i
OOOOO000oo0 = len ( IIIii1 )
if ( self . outer_version == 4 ) :
II1Ooo0000o00OO = OOOOO000oo0 + Oo0o0oo0
OOOOO000oo0 += 16
OoO00oo0 = OoO00oo0 [ 0 : 2 ] + struct . pack ( "H" , II1Ooo0000o00OO ) + OoO00oo0 [ 4 : : ]
if 31 - 31: OoooooooOO - oO0o / I1Ii111
OoO00oo0 = lisp_ip_checksum ( OoO00oo0 )
IIIii1 = OoO00oo0 + IIIii1
IIIii1 = self . fix_outer_header ( IIIii1 )
if 62 - 62: i11iIiiIii - I11i
if 81 - 81: I11i
if 92 - 92: OOooOOo - Oo0Ooo - OoooooooOO / IiII - i1IIi
if 81 - 81: i1IIi / I1Ii111 % i11iIiiIii . iIii1I11I1II1 * OoOoOO00 + OoooooooOO
if 31 - 31: i1IIi % II111iiii
Ii1iii11I = Oo0o0oo0 - 12
II1Ooo0000o00OO = socket . htons ( OOOOO000oo0 )
IIIii1 = IIIii1 [ 0 : Ii1iii11I ] + struct . pack ( "H" , II1Ooo0000o00OO ) + IIIii1 [ Ii1iii11I + 2 : : ]
if 2 - 2: OoooooooOO - Ii1I % oO0o / I1IiiI / o0oOOo0O0Ooo
I1IiiIi11 . append ( IIIii1 )
if 3 - 3: II111iiii / OOooOOo
return ( I1IiiIi11 , "Fragment-Inner" )
if 48 - 48: ooOoO0o . I1ii11iIi11i
if 49 - 49: i1IIi - OoOoOO00 . Oo0Ooo + iIii1I11I1II1 - ooOoO0o / Oo0Ooo
def fix_outer_header ( self , packet ) :
if 24 - 24: oO0o - iII111i / ooOoO0o
if 10 - 10: OoOoOO00 * i1IIi
if 15 - 15: I11i + i1IIi - II111iiii % I1IiiI
if 34 - 34: I1IiiI
if 57 - 57: OOooOOo . Ii1I % o0oOOo0O0Ooo
if 32 - 32: I11i / IiII - O0 * iIii1I11I1II1
if 70 - 70: OoooooooOO % OoooooooOO % OoO0O00
if 98 - 98: OoO0O00
if ( self . outer_version == 4 or self . inner_version == 4 ) :
if ( lisp_is_macos ( ) ) :
packet = packet [ 0 : 2 ] + packet [ 3 ] + packet [ 2 ] + packet [ 4 : 6 ] + packet [ 7 ] + packet [ 6 ] + packet [ 8 : : ]
if 18 - 18: I11i + Oo0Ooo - OoO0O00 / I1Ii111 / OOooOOo
else :
packet = packet [ 0 : 2 ] + packet [ 3 ] + packet [ 2 ] + packet [ 4 : : ]
if 53 - 53: OOooOOo + o0oOOo0O0Ooo . oO0o / I11i
if 52 - 52: I1Ii111 + I1Ii111
return ( packet )
if 73 - 73: o0oOOo0O0Ooo . i11iIiiIii % OoooooooOO + ooOoO0o . OoooooooOO / OOooOOo
if 54 - 54: OoOoOO00 . OoooooooOO
def send_packet ( self , lisp_raw_socket , dest ) :
if ( lisp_flow_logging and dest != self . inner_dest ) : self . log_flow ( True )
if 36 - 36: oO0o / II111iiii * IiII % I1ii11iIi11i
dest = dest . print_address_no_iid ( )
I1IiiIi11 , IiIIii = self . fragment ( )
if 74 - 74: iIii1I11I1II1 / Ii1I
for IIIii1 in I1IiiIi11 :
if ( len ( I1IiiIi11 ) != 1 ) :
self . packet = IIIii1
self . print_packet ( IiIIii , True )
if 59 - 59: Ii1I / II111iiii - IiII % OoOoOO00 % OoooooooOO
if 79 - 79: iII111i . OoooooooOO . I1IiiI * O0 * OoO0O00 - OOooOOo
try : lisp_raw_socket . sendto ( IIIii1 , ( dest , 0 ) )
except socket . error , Oo0ooo0Ooo :
lprint ( "socket.sendto() failed: {}" . format ( Oo0ooo0Ooo ) )
if 33 - 33: I1ii11iIi11i . Oo0Ooo + I1IiiI + o0oOOo0O0Ooo
if 54 - 54: ooOoO0o * iII111i * iII111i % OoOoOO00 - OOooOOo % I1ii11iIi11i
if 44 - 44: Oo0Ooo . OOooOOo + I11i
if 22 - 22: I1Ii111 * OoooooooOO + i11iIiiIii % OoO0O00
def send_l2_packet ( self , l2_socket , mac_header ) :
if ( l2_socket == None ) :
lprint ( "No layer-2 socket, drop IPv6 packet" )
return
if 53 - 53: I1IiiI
if ( mac_header == None ) :
lprint ( "Could not build MAC header, drop IPv6 packet" )
return
if 10 - 10: I1Ii111 / i11iIiiIii - II111iiii
if 48 - 48: OOooOOo
oOo = mac_header + self . packet
if 26 - 26: iII111i * I1Ii111 * oO0o * OoOoOO00
if 48 - 48: iII111i % i11iIiiIii . OoooooooOO * IiII % OoO0O00 . iII111i
if 6 - 6: O0 . ooOoO0o - oO0o / i11iIiiIii
if 84 - 84: I11i / I1ii11iIi11i * o0oOOo0O0Ooo * OoO0O00 * OOooOOo * O0
if 83 - 83: O0 % II111iiii + o0oOOo0O0Ooo / OoooooooOO
if 75 - 75: II111iiii . I1IiiI + OOooOOo - OoOoOO00 - O0 . I11i
if 19 - 19: Ii1I * i1IIi % O0 + I11i
if 25 - 25: I1Ii111 - Ii1I / O0 . OoooooooOO % I1IiiI . i1IIi
if 19 - 19: II111iiii / II111iiii % I1ii11iIi11i + oO0o + oO0o + iII111i
if 4 - 4: o0oOOo0O0Ooo + I11i / iII111i + i1IIi % o0oOOo0O0Ooo % iII111i
if 80 - 80: Ii1I
l2_socket . write ( oOo )
return
if 26 - 26: iIii1I11I1II1 . OoooooooOO - iIii1I11I1II1
if 59 - 59: I1ii11iIi11i + I11i . oO0o
def bridge_l2_packet ( self , eid , db ) :
try : oOOo0oO = db . dynamic_eids [ eid . print_address_no_iid ( ) ]
except : return
try : II111IiiiI1 = lisp_myinterfaces [ oOOo0oO . interface ]
except : return
try :
socket = II111IiiiI1 . get_bridge_socket ( )
if ( socket == None ) : return
except : return
if 19 - 19: iII111i
try : socket . send ( self . packet )
except socket . error , Oo0ooo0Ooo :
lprint ( "bridge_l2_packet(): socket.send() failed: {}" . format ( Oo0ooo0Ooo ) )
if 46 - 46: Oo0Ooo + II111iiii * I1IiiI + OOooOOo
if 31 - 31: Ii1I * o0oOOo0O0Ooo * Ii1I + OoO0O00 * o0oOOo0O0Ooo . I1Ii111
if 89 - 89: OoooooooOO * Ii1I * I1IiiI . ooOoO0o * Ii1I / iII111i
def is_lisp_packet ( self , packet ) :
OOOOo00oo00O = ( struct . unpack ( "B" , packet [ 9 ] ) [ 0 ] == LISP_UDP_PROTOCOL )
if ( OOOOo00oo00O == False ) : return ( False )
if 46 - 46: i11iIiiIii
Iiiii = struct . unpack ( "H" , packet [ 22 : 24 ] ) [ 0 ]
if ( socket . ntohs ( Iiiii ) == LISP_DATA_PORT ) : return ( True )
Iiiii = struct . unpack ( "H" , packet [ 20 : 22 ] ) [ 0 ]
if ( socket . ntohs ( Iiiii ) == LISP_DATA_PORT ) : return ( True )
return ( False )
if 25 - 25: Oo0Ooo * I1IiiI + OOooOOo + I1Ii111 % OOooOOo
if 84 - 84: O0 % Ii1I . Ii1I . iII111i * I11i
def decode ( self , is_lisp_packet , lisp_ipc_socket , stats ) :
self . packet_error = ""
oOo = self . packet
iI = len ( oOo )
OO0O = I11IiiiII = True
if 66 - 66: Oo0Ooo / i11iIiiIii % ooOoO0o
if 43 - 43: OOooOOo
if 84 - 84: OOooOOo . IiII . iII111i
if 2 - 2: Oo0Ooo - OoOoOO00
I1iiII = 0
II1 = 0
if ( is_lisp_packet ) :
II1 = self . lisp_header . get_instance_id ( )
oOOOO0 = struct . unpack ( "B" , oOo [ 0 : 1 ] ) [ 0 ]
self . outer_version = oOOOO0 >> 4
if ( self . outer_version == 4 ) :
if 99 - 99: OOooOOo + I1IiiI . I1ii11iIi11i * OoooooooOO
if 82 - 82: i11iIiiIii + iIii1I11I1II1 / Oo0Ooo + OOooOOo * II111iiii
if 34 - 34: o0oOOo0O0Ooo % OoooooooOO
if 36 - 36: I1IiiI
if 64 - 64: i11iIiiIii + i1IIi % O0 . I11i
o00o0 = struct . unpack ( "H" , oOo [ 10 : 12 ] ) [ 0 ]
oOo = lisp_ip_checksum ( oOo )
I11i11I = struct . unpack ( "H" , oOo [ 10 : 12 ] ) [ 0 ]
if ( I11i11I != 0 ) :
if ( o00o0 != 0 or lisp_is_macos ( ) == False ) :
self . packet_error = "checksum-error"
if ( stats ) :
stats [ self . packet_error ] . increment ( iI )
if 84 - 84: OoOoOO00 - Oo0Ooo . ooOoO0o . IiII - Oo0Ooo
if 99 - 99: I1Ii111
lprint ( "IPv4 header checksum failed for outer header" )
if ( lisp_flow_logging ) : self . log_flow ( False )
return ( None )
if 75 - 75: ooOoO0o . OOooOOo / IiII
if 84 - 84: OoooooooOO . I1IiiI / o0oOOo0O0Ooo
if 86 - 86: Oo0Ooo % OoOoOO00
o0o0O00oOo = LISP_AFI_IPV4
ii = 12
self . outer_tos = struct . unpack ( "B" , oOo [ 1 : 2 ] ) [ 0 ]
self . outer_ttl = struct . unpack ( "B" , oOo [ 8 : 9 ] ) [ 0 ]
I1iiII = 20
elif ( self . outer_version == 6 ) :
o0o0O00oOo = LISP_AFI_IPV6
ii = 8
iI1ii = struct . unpack ( "H" , oOo [ 0 : 2 ] ) [ 0 ]
self . outer_tos = ( socket . ntohs ( iI1ii ) >> 4 ) & 0xff
self . outer_ttl = struct . unpack ( "B" , oOo [ 7 : 8 ] ) [ 0 ]
I1iiII = 40
else :
self . packet_error = "outer-header-error"
if ( stats ) : stats [ self . packet_error ] . increment ( iI )
lprint ( "Cannot decode outer header" )
return ( None )
if 2 - 2: II111iiii . I11i
if 83 - 83: I1IiiI - I1Ii111 + I1IiiI . I1IiiI
self . outer_source . afi = o0o0O00oOo
self . outer_dest . afi = o0o0O00oOo
ii11ii11II = self . outer_source . addr_length ( )
if 35 - 35: Oo0Ooo * II111iiii
self . outer_source . unpack_address ( oOo [ ii : ii + ii11ii11II ] )
ii += ii11ii11II
self . outer_dest . unpack_address ( oOo [ ii : ii + ii11ii11II ] )
oOo = oOo [ I1iiII : : ]
self . outer_source . mask_len = self . outer_source . host_mask_len ( )
self . outer_dest . mask_len = self . outer_dest . host_mask_len ( )
if 32 - 32: oO0o . Oo0Ooo / ooOoO0o + ooOoO0o . I1ii11iIi11i
if 50 - 50: iIii1I11I1II1 * oO0o
if 85 - 85: i1IIi
if 100 - 100: OoooooooOO / I11i % OoO0O00 + Ii1I
IIi11 = struct . unpack ( "H" , oOo [ 0 : 2 ] ) [ 0 ]
self . udp_sport = socket . ntohs ( IIi11 )
IIi11 = struct . unpack ( "H" , oOo [ 2 : 4 ] ) [ 0 ]
self . udp_dport = socket . ntohs ( IIi11 )
IIi11 = struct . unpack ( "H" , oOo [ 4 : 6 ] ) [ 0 ]
self . udp_length = socket . ntohs ( IIi11 )
IIi11 = struct . unpack ( "H" , oOo [ 6 : 8 ] ) [ 0 ]
self . udp_checksum = socket . ntohs ( IIi11 )
oOo = oOo [ 8 : : ]
if 77 - 77: Oo0Ooo - IiII
if 50 - 50: OoO0O00 % OoooooooOO * II111iiii
if 54 - 54: OoooooooOO + Oo0Ooo * OOooOOo
if 98 - 98: oO0o - oO0o . ooOoO0o
OO0O = ( self . udp_dport == LISP_DATA_PORT or
self . udp_sport == LISP_DATA_PORT )
I11IiiiII = ( self . udp_dport in ( LISP_L2_DATA_PORT , LISP_VXLAN_DATA_PORT ) )
if 60 - 60: I1IiiI * I1ii11iIi11i / O0 + I11i + IiII
if 66 - 66: IiII * Oo0Ooo . OoooooooOO * I1Ii111
if 93 - 93: IiII / i1IIi
if 47 - 47: ooOoO0o - Ii1I
if ( self . lisp_header . decode ( oOo ) == False ) :
self . packet_error = "lisp-header-error"
if ( stats ) : stats [ self . packet_error ] . increment ( iI )
if 98 - 98: oO0o . I1Ii111 / OoOoOO00 . ooOoO0o
if ( lisp_flow_logging ) : self . log_flow ( False )
lprint ( "Cannot decode LISP header" )
return ( None )
if 1 - 1: OOooOOo
oOo = oOo [ 8 : : ]
II1 = self . lisp_header . get_instance_id ( )
I1iiII += 16
if 87 - 87: O0 * II111iiii + iIii1I11I1II1 % oO0o % i11iIiiIii - OoOoOO00
if ( II1 == 0xffffff ) : II1 = 0
if 73 - 73: iII111i + Ii1I
if 37 - 37: oO0o - iIii1I11I1II1 + II111iiii . Ii1I % iIii1I11I1II1
if 17 - 17: I1Ii111 + i1IIi % O0
if 65 - 65: IiII
iiI11 = False
OoooOOo0oOO = self . lisp_header . k_bits
if ( OoooOOo0oOO ) :
ooOOo0o = lisp_get_crypto_decap_lookup_key ( self . outer_source ,
self . udp_sport )
if ( ooOOo0o == None ) :
self . packet_error = "no-decrypt-key"
if ( stats ) : stats [ self . packet_error ] . increment ( iI )
if 44 - 44: OOooOOo % iIii1I11I1II1
self . print_packet ( "Receive" , is_lisp_packet )
iiiiIi111 = bold ( "No key available" , False )
dprint ( "{} for key-id {} to decrypt packet" . format ( iiiiIi111 , OoooOOo0oOO ) )
if ( lisp_flow_logging ) : self . log_flow ( False )
return ( None )
if 41 - 41: I1IiiI / IiII . Oo0Ooo / IiII
if 49 - 49: OoooooooOO - IiII
Iiii11 = lisp_crypto_keys_by_rloc_decap [ ooOOo0o ] [ OoooOOo0oOO ]
if ( Iiii11 == None ) :
self . packet_error = "no-decrypt-key"
if ( stats ) : stats [ self . packet_error ] . increment ( iI )
if 65 - 65: I1Ii111 + iII111i * iII111i
self . print_packet ( "Receive" , is_lisp_packet )
iiiiIi111 = bold ( "No key available" , False )
dprint ( "{} to decrypt packet from RLOC {}" . format ( iiiiIi111 ,
red ( ooOOo0o , False ) ) )
if ( lisp_flow_logging ) : self . log_flow ( False )
return ( None )
if 79 - 79: i1IIi / Oo0Ooo - I1IiiI . O0
if 56 - 56: IiII % O0 * i1IIi - II111iiii
if 74 - 74: i1IIi - OoOoOO00 % oO0o . O0 - OoooooooOO
if 84 - 84: I1Ii111
if 53 - 53: i1IIi
Iiii11 . use_count += 1
oOo , iiI11 = self . decrypt ( oOo , I1iiII , Iiii11 ,
ooOOo0o )
if ( iiI11 == False ) :
if ( stats ) : stats [ self . packet_error ] . increment ( iI )
if ( lisp_flow_logging ) : self . log_flow ( False )
return ( None )
if 59 - 59: o0oOOo0O0Ooo + I1IiiI % OoooooooOO - iIii1I11I1II1
if 9 - 9: i1IIi - OoOoOO00
if 57 - 57: iIii1I11I1II1 * Ii1I * iII111i / oO0o
if 46 - 46: Ii1I
if 61 - 61: o0oOOo0O0Ooo / ooOoO0o - II111iiii
if 87 - 87: I1ii11iIi11i / I1IiiI
oOOOO0 = struct . unpack ( "B" , oOo [ 0 : 1 ] ) [ 0 ]
self . inner_version = oOOOO0 >> 4
if ( OO0O and self . inner_version == 4 and oOOOO0 >= 0x45 ) :
IIi1IiiIi1III = socket . ntohs ( struct . unpack ( "H" , oOo [ 2 : 4 ] ) [ 0 ] )
self . inner_tos = struct . unpack ( "B" , oOo [ 1 : 2 ] ) [ 0 ]
self . inner_ttl = struct . unpack ( "B" , oOo [ 8 : 9 ] ) [ 0 ]
self . inner_protocol = struct . unpack ( "B" , oOo [ 9 : 10 ] ) [ 0 ]
self . inner_source . afi = LISP_AFI_IPV4
self . inner_dest . afi = LISP_AFI_IPV4
self . inner_source . unpack_address ( oOo [ 12 : 16 ] )
self . inner_dest . unpack_address ( oOo [ 16 : 20 ] )
oOOO00Oo = socket . ntohs ( struct . unpack ( "H" , oOo [ 6 : 8 ] ) [ 0 ] )
self . inner_is_fragment = ( oOOO00Oo & 0x2000 or oOOO00Oo != 0 )
if ( self . inner_protocol == LISP_UDP_PROTOCOL ) :
self . inner_sport = struct . unpack ( "H" , oOo [ 20 : 22 ] ) [ 0 ]
self . inner_sport = socket . ntohs ( self . inner_sport )
self . inner_dport = struct . unpack ( "H" , oOo [ 22 : 24 ] ) [ 0 ]
self . inner_dport = socket . ntohs ( self . inner_dport )
if 19 - 19: i1IIi % I1IiiI - iIii1I11I1II1 - oO0o / I1ii11iIi11i
elif ( OO0O and self . inner_version == 6 and oOOOO0 >= 0x60 ) :
IIi1IiiIi1III = socket . ntohs ( struct . unpack ( "H" , oOo [ 4 : 6 ] ) [ 0 ] ) + 40
iI1ii = struct . unpack ( "H" , oOo [ 0 : 2 ] ) [ 0 ]
self . inner_tos = ( socket . ntohs ( iI1ii ) >> 4 ) & 0xff
self . inner_ttl = struct . unpack ( "B" , oOo [ 7 : 8 ] ) [ 0 ]
self . inner_protocol = struct . unpack ( "B" , oOo [ 6 : 7 ] ) [ 0 ]
self . inner_source . afi = LISP_AFI_IPV6
self . inner_dest . afi = LISP_AFI_IPV6
self . inner_source . unpack_address ( oOo [ 8 : 24 ] )
self . inner_dest . unpack_address ( oOo [ 24 : 40 ] )
if ( self . inner_protocol == LISP_UDP_PROTOCOL ) :
self . inner_sport = struct . unpack ( "H" , oOo [ 40 : 42 ] ) [ 0 ]
self . inner_sport = socket . ntohs ( self . inner_sport )
self . inner_dport = struct . unpack ( "H" , oOo [ 42 : 44 ] ) [ 0 ]
self . inner_dport = socket . ntohs ( self . inner_dport )
if 16 - 16: Ii1I
elif ( I11IiiiII ) :
IIi1IiiIi1III = len ( oOo )
self . inner_tos = 0
self . inner_ttl = 0
self . inner_protocol = 0
self . inner_source . afi = LISP_AFI_MAC
self . inner_dest . afi = LISP_AFI_MAC
self . inner_dest . unpack_address ( self . swap_mac ( oOo [ 0 : 6 ] ) )
self . inner_source . unpack_address ( self . swap_mac ( oOo [ 6 : 12 ] ) )
elif ( self . lisp_header . get_instance_id ( ) == 0xffffff ) :
if ( lisp_flow_logging ) : self . log_flow ( False )
return ( self )
else :
self . packet_error = "bad-inner-version"
if ( stats ) : stats [ self . packet_error ] . increment ( iI )
if 79 - 79: OoooooooOO - ooOoO0o * Ii1I - II111iiii % OoOoOO00 * IiII
lprint ( "Cannot decode encapsulation, header version {}" . format ( hex ( oOOOO0 ) ) )
if 31 - 31: I1IiiI
oOo = lisp_format_packet ( oOo [ 0 : 20 ] )
lprint ( "Packet header: {}" . format ( oOo ) )
if ( lisp_flow_logging and is_lisp_packet ) : self . log_flow ( False )
return ( None )
if 36 - 36: OoO0O00 + OoO0O00 + OoO0O00 % Oo0Ooo * iII111i
self . inner_source . mask_len = self . inner_source . host_mask_len ( )
self . inner_dest . mask_len = self . inner_dest . host_mask_len ( )
self . inner_source . instance_id = II1
self . inner_dest . instance_id = II1
if 98 - 98: I11i . I11i / Oo0Ooo / Ii1I / I1IiiI
if 56 - 56: o0oOOo0O0Ooo / IiII
if 11 - 11: OoOoOO00 / I11i
if 47 - 47: OOooOOo . I1Ii111 % II111iiii + Oo0Ooo - oO0o . II111iiii
if 37 - 37: iIii1I11I1II1 . I1IiiI % OoO0O00 % OoooooooOO . OoooooooOO / O0
if ( lisp_nonce_echoing and is_lisp_packet ) :
IiIii1i11i1 = lisp_get_echo_nonce ( self . outer_source , None )
if ( IiIii1i11i1 == None ) :
ooOOo00o0ooO = self . outer_source . print_address_no_iid ( )
IiIii1i11i1 = lisp_echo_nonce ( ooOOo00o0ooO )
if 40 - 40: o0oOOo0O0Ooo . o0oOOo0O0Ooo * i11iIiiIii
i11III1I = self . lisp_header . get_nonce ( )
if ( self . lisp_header . is_e_bit_set ( ) ) :
IiIii1i11i1 . receive_request ( lisp_ipc_socket , i11III1I )
elif ( IiIii1i11i1 . request_nonce_sent ) :
IiIii1i11i1 . receive_echo ( lisp_ipc_socket , i11III1I )
if 98 - 98: Ii1I - O0 * oO0o * Ii1I * Ii1I
if 44 - 44: IiII + I11i
if 66 - 66: oO0o
if 34 - 34: iII111i % i11iIiiIii + i11iIiiIii - iII111i
if 2 - 2: II111iiii + i1IIi
if 68 - 68: OOooOOo + Ii1I
if 58 - 58: IiII * Ii1I . i1IIi
if ( iiI11 ) : self . packet += oOo [ : IIi1IiiIi1III ]
if 19 - 19: oO0o
if 85 - 85: ooOoO0o - I1IiiI / i1IIi / OoO0O00 / II111iiii
if 94 - 94: iIii1I11I1II1 + IiII
if 44 - 44: OoO0O00 + I11i % OoO0O00 + i1IIi + iII111i + O0
if ( lisp_flow_logging and is_lisp_packet ) : self . log_flow ( False )
return ( self )
if 18 - 18: iIii1I11I1II1 % iIii1I11I1II1 % oO0o + I1IiiI % ooOoO0o / Ii1I
if 36 - 36: OoOoOO00 . i11iIiiIii
def swap_mac ( self , mac ) :
return ( mac [ 1 ] + mac [ 0 ] + mac [ 3 ] + mac [ 2 ] + mac [ 5 ] + mac [ 4 ] )
if 81 - 81: Oo0Ooo * iII111i * OoO0O00
if 85 - 85: O0 * oO0o
def strip_outer_headers ( self ) :
ii = 16
ii += 20 if ( self . outer_version == 4 ) else 40
self . packet = self . packet [ ii : : ]
return ( self )
if 39 - 39: II111iiii * I1IiiI - iIii1I11I1II1
if 25 - 25: OoooooooOO . Ii1I % iII111i . IiII
def hash_ports ( self ) :
oOo = self . packet
oOOOO0 = self . inner_version
ooo000 = 0
if ( oOOOO0 == 4 ) :
oooOoO0oo0o0 = struct . unpack ( "B" , oOo [ 9 ] ) [ 0 ]
if ( self . inner_is_fragment ) : return ( oooOoO0oo0o0 )
if ( oooOoO0oo0o0 in [ 6 , 17 ] ) :
ooo000 = oooOoO0oo0o0
ooo000 += struct . unpack ( "I" , oOo [ 20 : 24 ] ) [ 0 ]
ooo000 = ( ooo000 >> 16 ) ^ ( ooo000 & 0xffff )
if 4 - 4: i11iIiiIii * I1ii11iIi11i + OoooooooOO - IiII . ooOoO0o . iIii1I11I1II1
if 48 - 48: o0oOOo0O0Ooo * oO0o . I1IiiI - I1Ii111 + OOooOOo . Oo0Ooo
if ( oOOOO0 == 6 ) :
oooOoO0oo0o0 = struct . unpack ( "B" , oOo [ 6 ] ) [ 0 ]
if ( oooOoO0oo0o0 in [ 6 , 17 ] ) :
ooo000 = oooOoO0oo0o0
ooo000 += struct . unpack ( "I" , oOo [ 40 : 44 ] ) [ 0 ]
ooo000 = ( ooo000 >> 16 ) ^ ( ooo000 & 0xffff )
if 62 - 62: I11i + OoooooooOO * iIii1I11I1II1 / i1IIi * O0
if 10 - 10: iIii1I11I1II1 * OoooooooOO / OOooOOo
return ( ooo000 )
if 33 - 33: o0oOOo0O0Ooo % IiII - iIii1I11I1II1 % OOooOOo + I1Ii111 - i11iIiiIii
if 91 - 91: OoooooooOO . iIii1I11I1II1 / i11iIiiIii
def hash_packet ( self ) :
ooo000 = self . inner_source . address ^ self . inner_dest . address
ooo000 += self . hash_ports ( )
if ( self . inner_version == 4 ) :
ooo000 = ( ooo000 >> 16 ) ^ ( ooo000 & 0xffff )
elif ( self . inner_version == 6 ) :
ooo000 = ( ooo000 >> 64 ) ^ ( ooo000 & 0xffffffffffffffff )
ooo000 = ( ooo000 >> 32 ) ^ ( ooo000 & 0xffffffff )
ooo000 = ( ooo000 >> 16 ) ^ ( ooo000 & 0xffff )
if 80 - 80: I1IiiI
self . udp_sport = 0xf000 | ( ooo000 & 0xfff )
if 58 - 58: oO0o + I1ii11iIi11i % OoOoOO00
if 22 - 22: iIii1I11I1II1 - Ii1I / I1IiiI * IiII
def print_packet ( self , s_or_r , is_lisp_packet ) :
if ( is_lisp_packet == False ) :
III1IIi = "{} -> {}" . format ( self . inner_source . print_address ( ) ,
self . inner_dest . print_address ( ) )
dprint ( ( "{} {}, tos/ttl: {}/{}, length: {}, packet: {} ..." ) . format ( bold ( s_or_r , False ) ,
# ooOoO0o - i1IIi . OoOoOO00
green ( III1IIi , False ) , self . inner_tos ,
self . inner_ttl , len ( self . packet ) ,
lisp_format_packet ( self . packet [ 0 : 60 ] ) ) )
return
if 12 - 12: IiII / OoO0O00 / O0 * IiII
if 51 - 51: ooOoO0o * iII111i / i1IIi
if ( s_or_r . find ( "Receive" ) != - 1 ) :
IIi1I1 = "decap"
IIi1I1 += "-vxlan" if self . udp_dport == LISP_VXLAN_DATA_PORT else ""
else :
IIi1I1 = s_or_r
if ( IIi1I1 in [ "Send" , "Replicate" ] or IIi1I1 . find ( "Fragment" ) != - 1 ) :
IIi1I1 = "encap"
if 37 - 37: o0oOOo0O0Ooo * Oo0Ooo
if 11 - 11: oO0o
Oo0O0o00o00 = "{} -> {}" . format ( self . outer_source . print_address_no_iid ( ) ,
self . outer_dest . print_address_no_iid ( ) )
if 90 - 90: I1Ii111 . II111iiii . I1ii11iIi11i
if 32 - 32: ooOoO0o - OoO0O00 . iII111i . iII111i % i1IIi * Ii1I
if 65 - 65: iII111i / ooOoO0o . II111iiii
if 90 - 90: I11i
if 95 - 95: OoO0O00
if ( self . lisp_header . get_instance_id ( ) == 0xffffff ) :
IIIIIiI11Ii = ( "{} LISP packet, outer RLOCs: {}, outer tos/ttl: " + "{}/{}, outer UDP: {} -> {}, " )
if 68 - 68: iIii1I11I1II1 . iIii1I11I1II1 / OoOoOO00 - II111iiii - iIii1I11I1II1
IIIIIiI11Ii += bold ( "control-packet" , False ) + ": {} ..."
if 75 - 75: ooOoO0o . I1IiiI * II111iiii
dprint ( IIIIIiI11Ii . format ( bold ( s_or_r , False ) , red ( Oo0O0o00o00 , False ) ,
self . outer_tos , self . outer_ttl , self . udp_sport ,
self . udp_dport , lisp_format_packet ( self . packet [ 0 : 56 ] ) ) )
return
else :
IIIIIiI11Ii = ( "{} LISP packet, outer RLOCs: {}, outer tos/ttl: " + "{}/{}, outer UDP: {} -> {}, inner EIDs: {}, " + "inner tos/ttl: {}/{}, length: {}, {}, packet: {} ..." )
if 99 - 99: iIii1I11I1II1 * I1ii11iIi11i + IiII
if 70 - 70: i1IIi % ooOoO0o . I1ii11iIi11i - IiII + OOooOOo
if 84 - 84: oO0o + II111iiii * II111iiii % o0oOOo0O0Ooo / iII111i + ooOoO0o
if 9 - 9: iII111i
if ( self . lisp_header . k_bits ) :
if ( IIi1I1 == "encap" ) : IIi1I1 = "encrypt/encap"
if ( IIi1I1 == "decap" ) : IIi1I1 = "decap/decrypt"
if 25 - 25: OOooOOo - Ii1I . I11i
if 57 - 57: o0oOOo0O0Ooo + Oo0Ooo * I1ii11iIi11i - ooOoO0o % iIii1I11I1II1 - Ii1I
III1IIi = "{} -> {}" . format ( self . inner_source . print_address ( ) ,
self . inner_dest . print_address ( ) )
if 37 - 37: OoO0O00 * I11i + Ii1I + I1ii11iIi11i * o0oOOo0O0Ooo
dprint ( IIIIIiI11Ii . format ( bold ( s_or_r , False ) , red ( Oo0O0o00o00 , False ) ,
self . outer_tos , self . outer_ttl , self . udp_sport , self . udp_dport ,
green ( III1IIi , False ) , self . inner_tos , self . inner_ttl ,
len ( self . packet ) , self . lisp_header . print_header ( IIi1I1 ) ,
lisp_format_packet ( self . packet [ 0 : 56 ] ) ) )
if 95 - 95: Ii1I - i11iIiiIii % i11iIiiIii - O0 * I1Ii111
if 81 - 81: II111iiii * I1IiiI % i1IIi * i11iIiiIii + OoOoOO00
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . inner_source , self . inner_dest ) )
if 100 - 100: i1IIi % Ii1I
if 55 - 55: I1IiiI + iII111i
def get_raw_socket ( self ) :
II1 = str ( self . lisp_header . get_instance_id ( ) )
if ( II1 == "0" ) : return ( None )
if ( lisp_iid_to_interface . has_key ( II1 ) == False ) : return ( None )
if 85 - 85: oO0o + iII111i % iII111i / I11i . I1IiiI - OoOoOO00
II111IiiiI1 = lisp_iid_to_interface [ II1 ]
o00oOOO = II111IiiiI1 . get_socket ( )
if ( o00oOOO == None ) :
O0I11IIIII = bold ( "SO_BINDTODEVICE" , False )
i1I11 = ( os . getenv ( "LISP_ENFORCE_BINDTODEVICE" ) != None )
lprint ( "{} required for multi-tenancy support, {} packet" . format ( O0I11IIIII , "drop" if i1I11 else "forward" ) )
if 76 - 76: iIii1I11I1II1 / I1Ii111 - I1ii11iIi11i % o0oOOo0O0Ooo % OOooOOo + OoooooooOO
if ( i1I11 ) : return ( None )
if 10 - 10: OoO0O00 * I11i / Oo0Ooo - I1Ii111
if 11 - 11: IiII % I1ii11iIi11i / ooOoO0o . i11iIiiIii + OOooOOo - II111iiii
II1 = bold ( II1 , False )
i1 = bold ( II111IiiiI1 . device , False )
dprint ( "Send packet on instance-id {} interface {}" . format ( II1 , i1 ) )
return ( o00oOOO )
if 50 - 50: i1IIi * oO0o / i11iIiiIii / i11iIiiIii / oO0o
if 84 - 84: I1ii11iIi11i - iII111i + I1ii11iIi11i
def log_flow ( self , encap ) :
global lisp_flow_log
if 63 - 63: I11i * ooOoO0o % II111iiii % I1Ii111 + I1IiiI * Oo0Ooo
o0oOo00OOo0O = os . path . exists ( "./log-flows" )
if ( len ( lisp_flow_log ) == LISP_FLOW_LOG_SIZE or o0oOo00OOo0O ) :
OO0OOoOOO = [ lisp_flow_log ]
lisp_flow_log = [ ]
threading . Thread ( target = lisp_write_flow_log , args = OO0OOoOOO ) . start ( )
if ( o0oOo00OOo0O ) : os . system ( "rm ./log-flows" )
return
if 96 - 96: I1ii11iIi11i - O0
if 35 - 35: OOooOOo . I11i . I1Ii111 - I11i % I11i + I1Ii111
OOOO0O00o = datetime . datetime . now ( )
lisp_flow_log . append ( [ OOOO0O00o , encap , self . packet , self ] )
if 99 - 99: o0oOOo0O0Ooo + OOooOOo
if 34 - 34: I1Ii111 * o0oOOo0O0Ooo . I1IiiI % i11iIiiIii
def print_flow ( self , ts , encap , packet ) :
ts = ts . strftime ( "%m/%d/%y %H:%M:%S.%f" ) [ : - 3 ]
Oo0OO0 = "{}: {}" . format ( ts , "encap" if encap else "decap" )
if 74 - 74: Ii1I - OoooooooOO
Iii1I1I = red ( self . outer_source . print_address_no_iid ( ) , False )
IIi = red ( self . outer_dest . print_address_no_iid ( ) , False )
IIiIi1II1IiI = green ( self . inner_source . print_address ( ) , False )
oo0OoO = green ( self . inner_dest . print_address ( ) , False )
if 3 - 3: IiII - OoooooooOO * OoooooooOO - I1IiiI / I1Ii111 * I1ii11iIi11i
if ( self . lisp_header . get_instance_id ( ) == 0xffffff ) :
Oo0OO0 += " {}:{} -> {}:{}, LISP control message type {}\n"
Oo0OO0 = Oo0OO0 . format ( Iii1I1I , self . udp_sport , IIi , self . udp_dport ,
self . inner_version )
return ( Oo0OO0 )
if 58 - 58: IiII % iIii1I11I1II1 / i11iIiiIii % o0oOOo0O0Ooo . I1Ii111 * iII111i
if 32 - 32: OoooooooOO + o0oOOo0O0Ooo
if ( self . outer_dest . is_null ( ) == False ) :
Oo0OO0 += " {}:{} -> {}:{}, len/tos/ttl {}/{}/{}"
Oo0OO0 = Oo0OO0 . format ( Iii1I1I , self . udp_sport , IIi , self . udp_dport ,
len ( packet ) , self . outer_tos , self . outer_ttl )
if 91 - 91: ooOoO0o - I1Ii111 * I1Ii111
if 55 - 55: iIii1I11I1II1 + I1IiiI - Oo0Ooo
if 24 - 24: OoO0O00 / I1Ii111 + iII111i * I11i * iII111i
if 10 - 10: I1IiiI - I1ii11iIi11i - Oo0Ooo - o0oOOo0O0Ooo
if 21 - 21: OoooooooOO + I1Ii111
if ( self . lisp_header . k_bits != 0 ) :
iiIi1111Ii1 = "\n"
if ( self . packet_error != "" ) :
iiIi1111Ii1 = " ({})" . format ( self . packet_error ) + iiIi1111Ii1
if 31 - 31: o0oOOo0O0Ooo * I11i - i11iIiiIii - I1IiiI
Oo0OO0 += ", encrypted" + iiIi1111Ii1
return ( Oo0OO0 )
if 19 - 19: iII111i . I11i * OoooooooOO - OOooOOo + O0 * I1Ii111
if 90 - 90: i1IIi . oO0o / I1Ii111 . OOooOOo / I1Ii111
if 1 - 1: iII111i % ooOoO0o
if 99 - 99: iII111i + iIii1I11I1II1 . OOooOOo / OoO0O00 * I1ii11iIi11i
if 87 - 87: IiII / II111iiii % OoO0O00 % OoO0O00
if ( self . outer_dest . is_null ( ) == False ) :
packet = packet [ 36 : : ] if self . outer_version == 4 else packet [ 56 : : ]
if 28 - 28: OoOoOO00 % oO0o - OOooOOo + OOooOOo + oO0o / iIii1I11I1II1
if 91 - 91: I1IiiI / II111iiii * OOooOOo
oooOoO0oo0o0 = packet [ 9 ] if self . inner_version == 4 else packet [ 6 ]
oooOoO0oo0o0 = struct . unpack ( "B" , oooOoO0oo0o0 ) [ 0 ]
if 94 - 94: II111iiii - iIii1I11I1II1 - iIii1I11I1II1
Oo0OO0 += " {} -> {}, len/tos/ttl/prot {}/{}/{}/{}"
Oo0OO0 = Oo0OO0 . format ( IIiIi1II1IiI , oo0OoO , len ( packet ) , self . inner_tos ,
self . inner_ttl , oooOoO0oo0o0 )
if 83 - 83: I1ii11iIi11i * iIii1I11I1II1 + OoOoOO00 * i1IIi . OoooooooOO % Ii1I
if 81 - 81: OoO0O00 - iIii1I11I1II1
if 60 - 60: I1Ii111
if 77 - 77: I1IiiI / I1ii11iIi11i
if ( oooOoO0oo0o0 in [ 6 , 17 ] ) :
o0OoOOoooooOO = packet [ 20 : 24 ] if self . inner_version == 4 else packet [ 40 : 44 ]
if ( len ( o0OoOOoooooOO ) == 4 ) :
o0OoOOoooooOO = socket . ntohl ( struct . unpack ( "I" , o0OoOOoooooOO ) [ 0 ] )
Oo0OO0 += ", ports {} -> {}" . format ( o0OoOOoooooOO >> 16 , o0OoOOoooooOO & 0xffff )
if 88 - 88: i1IIi
elif ( oooOoO0oo0o0 == 1 ) :
O0o = packet [ 26 : 28 ] if self . inner_version == 4 else packet [ 46 : 48 ]
if ( len ( O0o ) == 2 ) :
O0o = socket . ntohs ( struct . unpack ( "H" , O0o ) [ 0 ] )
Oo0OO0 += ", icmp-seq {}" . format ( O0o )
if 69 - 69: oO0o - I1Ii111 / Oo0Ooo
if 15 - 15: i1IIi
if ( self . packet_error != "" ) :
Oo0OO0 += " ({})" . format ( self . packet_error )
if 39 - 39: Ii1I % i1IIi . I1ii11iIi11i - O0
Oo0OO0 += "\n"
return ( Oo0OO0 )
if 65 - 65: oO0o * oO0o / I11i + oO0o % ooOoO0o + OoOoOO00
if 92 - 92: o0oOOo0O0Ooo
def is_trace ( self ) :
o0OoOOoooooOO = [ self . inner_sport , self . inner_dport ]
return ( self . inner_protocol == LISP_UDP_PROTOCOL and
LISP_TRACE_PORT in o0OoOOoooooOO )
if 37 - 37: oO0o
if 18 - 18: IiII * i11iIiiIii + iIii1I11I1II1 % I11i + i1IIi - OoO0O00
if 85 - 85: OoO0O00 * I11i + OoO0O00
if 39 - 39: Oo0Ooo / i1IIi % i1IIi
if 20 - 20: OOooOOo * oO0o
if 91 - 91: OoO0O00 % i1IIi - iIii1I11I1II1 . OOooOOo
if 31 - 31: oO0o % i1IIi . OoooooooOO - o0oOOo0O0Ooo + OoooooooOO
if 45 - 45: OOooOOo + I11i / OoooooooOO - Ii1I + OoooooooOO
if 42 - 42: iIii1I11I1II1 * I1IiiI * I1Ii111
if 62 - 62: OOooOOo * O0 % IiII . IiII . I1IiiI
if 91 - 91: i1IIi . iII111i
if 37 - 37: iII111i - I11i + iIii1I11I1II1 / I1Ii111 - OoO0O00 . o0oOOo0O0Ooo
if 62 - 62: I1ii11iIi11i
if 47 - 47: I1Ii111 % OOooOOo * OoO0O00 . iIii1I11I1II1 % Oo0Ooo + OoooooooOO
if 2 - 2: I1Ii111 % OoooooooOO - ooOoO0o * I1ii11iIi11i * IiII
if 99 - 99: iIii1I11I1II1 . Oo0Ooo / ooOoO0o . OOooOOo % I1IiiI * I11i
LISP_N_BIT = 0x80000000
LISP_L_BIT = 0x40000000
LISP_E_BIT = 0x20000000
LISP_V_BIT = 0x10000000
LISP_I_BIT = 0x08000000
LISP_P_BIT = 0x04000000
LISP_K_BITS = 0x03000000
if 95 - 95: oO0o
class lisp_data_header ( ) :
def __init__ ( self ) :
self . first_long = 0
self . second_long = 0
self . k_bits = 0
if 80 - 80: IiII
if 42 - 42: OoooooooOO * II111iiii
def print_header ( self , e_or_d ) :
O0oooOO = lisp_hex_string ( self . first_long & 0xffffff )
IIiIi1I1iI1 = lisp_hex_string ( self . second_long ) . zfill ( 8 )
if 39 - 39: OOooOOo
IIIIIiI11Ii = ( "{} LISP-header -> flags: {}{}{}{}{}{}{}{}, nonce: {}, " + "iid/lsb: {}" )
if 70 - 70: IiII % OoO0O00 % I1IiiI
return ( IIIIIiI11Ii . format ( bold ( e_or_d , False ) ,
"N" if ( self . first_long & LISP_N_BIT ) else "n" ,
"L" if ( self . first_long & LISP_L_BIT ) else "l" ,
"E" if ( self . first_long & LISP_E_BIT ) else "e" ,
"V" if ( self . first_long & LISP_V_BIT ) else "v" ,
"I" if ( self . first_long & LISP_I_BIT ) else "i" ,
"P" if ( self . first_long & LISP_P_BIT ) else "p" ,
"K" if ( self . k_bits in [ 2 , 3 ] ) else "k" ,
"K" if ( self . k_bits in [ 1 , 3 ] ) else "k" ,
O0oooOO , IIiIi1I1iI1 ) )
if 95 - 95: OoOoOO00 - I1Ii111 / O0 * I1IiiI - o0oOOo0O0Ooo
if 12 - 12: iIii1I11I1II1 % Oo0Ooo . iII111i . IiII % i11iIiiIii
def encode ( self ) :
IIiI1I11ii1i = "II"
O0oooOO = socket . htonl ( self . first_long )
IIiIi1I1iI1 = socket . htonl ( self . second_long )
if 75 - 75: O0
oooooOOo0Oo = struct . pack ( IIiI1I11ii1i , O0oooOO , IIiIi1I1iI1 )
return ( oooooOOo0Oo )
if 29 - 29: O0 * i11iIiiIii / OoooooooOO / o0oOOo0O0Ooo . ooOoO0o
if 70 - 70: OoooooooOO . ooOoO0o / oO0o . oO0o - o0oOOo0O0Ooo
def decode ( self , packet ) :
IIiI1I11ii1i = "II"
i1II1i1iiI1 = struct . calcsize ( IIiI1I11ii1i )
if ( len ( packet ) < i1II1i1iiI1 ) : return ( False )
if 62 - 62: Ii1I . i11iIiiIii % O0 % I1Ii111 - Oo0Ooo
O0oooOO , IIiIi1I1iI1 = struct . unpack ( IIiI1I11ii1i , packet [ : i1II1i1iiI1 ] )
if 69 - 69: II111iiii . OoOoOO00 * OoOoOO00 % Ii1I + I1IiiI
if 100 - 100: i11iIiiIii - Oo0Ooo
self . first_long = socket . ntohl ( O0oooOO )
self . second_long = socket . ntohl ( IIiIi1I1iI1 )
self . k_bits = ( self . first_long & LISP_K_BITS ) >> 24
return ( True )
if 47 - 47: iII111i * OoOoOO00 * IiII
if 46 - 46: Ii1I
def key_id ( self , key_id ) :
self . first_long &= ~ ( 0x3 << 24 )
self . first_long |= ( ( key_id & 0x3 ) << 24 )
self . k_bits = key_id
if 42 - 42: iIii1I11I1II1
if 32 - 32: Oo0Ooo - Ii1I . OoooooooOO - OoooooooOO - Oo0Ooo . iIii1I11I1II1
def nonce ( self , nonce ) :
self . first_long |= LISP_N_BIT
self . first_long |= nonce
if 34 - 34: Oo0Ooo
if 31 - 31: i1IIi - I11i + I1Ii111 + ooOoO0o . ooOoO0o . O0
def map_version ( self , version ) :
self . first_long |= LISP_V_BIT
self . first_long |= version
if 33 - 33: i1IIi / iII111i * OoO0O00
if 2 - 2: oO0o . OOooOOo
def instance_id ( self , iid ) :
if ( iid == 0 ) : return
self . first_long |= LISP_I_BIT
self . second_long &= 0xff
self . second_long |= ( iid << 8 )
if 43 - 43: iIii1I11I1II1
if 29 - 29: IiII % ooOoO0o + OoO0O00 . i1IIi + I1IiiI
def get_instance_id ( self ) :
return ( ( self . second_long >> 8 ) & 0xffffff )
if 24 - 24: I1Ii111 / Ii1I * I1ii11iIi11i - OoooooooOO / I1IiiI . oO0o
if 98 - 98: i1IIi - iII111i
def locator_status_bits ( self , lsbs ) :
self . first_long |= LISP_L_BIT
self . second_long &= 0xffffff00
self . second_long |= ( lsbs & 0xff )
if 49 - 49: o0oOOo0O0Ooo . Ii1I . oO0o
if 9 - 9: IiII - II111iiii * OoO0O00
def is_request_nonce ( self , nonce ) :
return ( nonce & 0x80000000 )
if 78 - 78: iIii1I11I1II1 / O0 * oO0o / iII111i / OoOoOO00
if 15 - 15: ooOoO0o / oO0o
def request_nonce ( self , nonce ) :
self . first_long |= LISP_E_BIT
self . first_long |= LISP_N_BIT
self . first_long |= ( nonce & 0xffffff )
if 54 - 54: ooOoO0o - iIii1I11I1II1 - I11i % Ii1I / II111iiii
if 80 - 80: i11iIiiIii % iIii1I11I1II1 / i11iIiiIii
def is_e_bit_set ( self ) :
return ( self . first_long & LISP_E_BIT )
if 66 - 66: OoOoOO00 . iIii1I11I1II1 * I1ii11iIi11i - Ii1I - iIii1I11I1II1
if 28 - 28: OoOoOO00 % OoooooooOO
def get_nonce ( self ) :
return ( self . first_long & 0xffffff )
if 13 - 13: IiII . Oo0Ooo - I11i / oO0o - Oo0Ooo - I1IiiI
if 84 - 84: II111iiii
if 57 - 57: O0 * iIii1I11I1II1 % O0 . OoooooooOO
class lisp_echo_nonce ( ) :
def __init__ ( self , rloc_str ) :
self . rloc_str = rloc_str
self . rloc = lisp_address ( LISP_AFI_NONE , rloc_str , 0 , 0 )
self . request_nonce_sent = None
self . echo_nonce_sent = None
self . last_request_nonce_sent = None
self . last_new_request_nonce_sent = None
self . last_echo_nonce_sent = None
self . last_new_echo_nonce_sent = None
self . request_nonce_rcvd = None
self . echo_nonce_rcvd = None
self . last_request_nonce_rcvd = None
self . last_echo_nonce_rcvd = None
self . last_good_echo_nonce_rcvd = None
lisp_nonce_echo_list [ rloc_str ] = self
if 53 - 53: Ii1I / I1IiiI * Ii1I + o0oOOo0O0Ooo + oO0o - Oo0Ooo
if 16 - 16: OoO0O00 % I1Ii111 . i1IIi / I1ii11iIi11i - O0
def send_ipc ( self , ipc_socket , ipc ) :
oo = "lisp-itr" if lisp_i_am_itr else "lisp-etr"
iIi11i1I11Ii = "lisp-etr" if lisp_i_am_itr else "lisp-itr"
ipc = lisp_command_ipc ( ipc , oo )
lisp_ipc ( ipc , ipc_socket , iIi11i1I11Ii )
if 59 - 59: i11iIiiIii - I11i
if 59 - 59: OoooooooOO * o0oOOo0O0Ooo / I1Ii111
def send_request_ipc ( self , ipc_socket , nonce ) :
nonce = lisp_hex_string ( nonce )
oOooOOoo = "nonce%R%{}%{}" . format ( self . rloc_str , nonce )
self . send_ipc ( ipc_socket , oOooOOoo )
if 12 - 12: oO0o . OOooOOo
if 52 - 52: i11iIiiIii / I11i % IiII
def send_echo_ipc ( self , ipc_socket , nonce ) :
nonce = lisp_hex_string ( nonce )
oOooOOoo = "nonce%E%{}%{}" . format ( self . rloc_str , nonce )
self . send_ipc ( ipc_socket , oOooOOoo )
if 21 - 21: iII111i % IiII % Oo0Ooo % O0
if 63 - 63: II111iiii * I1IiiI - OoooooooOO / I1IiiI
def receive_request ( self , ipc_socket , nonce ) :
III11II111 = self . request_nonce_rcvd
self . request_nonce_rcvd = nonce
self . last_request_nonce_rcvd = lisp_get_timestamp ( )
if ( lisp_i_am_rtr ) : return
if ( III11II111 != nonce ) : self . send_request_ipc ( ipc_socket , nonce )
if 8 - 8: i11iIiiIii
if 4 - 4: i11iIiiIii
def receive_echo ( self , ipc_socket , nonce ) :
if ( self . request_nonce_sent != nonce ) : return
self . last_echo_nonce_rcvd = lisp_get_timestamp ( )
if ( self . echo_nonce_rcvd == nonce ) : return
if 28 - 28: OoO0O00
self . echo_nonce_rcvd = nonce
if ( lisp_i_am_rtr ) : return
self . send_echo_ipc ( ipc_socket , nonce )
if 73 - 73: Oo0Ooo . ooOoO0o - Oo0Ooo % OOooOOo / i11iIiiIii / iIii1I11I1II1
if 15 - 15: ooOoO0o * iIii1I11I1II1 * oO0o
def get_request_or_echo_nonce ( self , ipc_socket , remote_rloc ) :
if 96 - 96: I1Ii111 * iIii1I11I1II1 / OoOoOO00 % OOooOOo * II111iiii
if 3 - 3: OOooOOo . Oo0Ooo / i11iIiiIii + OoO0O00
if 47 - 47: IiII . OOooOOo
if 96 - 96: I11i % II111iiii / ooOoO0o % OOooOOo / ooOoO0o % i11iIiiIii
if 57 - 57: I11i - I11i % II111iiii % Oo0Ooo . o0oOOo0O0Ooo % Oo0Ooo
if ( self . request_nonce_sent and self . echo_nonce_sent and remote_rloc ) :
OoOOOO00 = lisp_myrlocs [ 0 ] if remote_rloc . is_ipv4 ( ) else lisp_myrlocs [ 1 ]
if 15 - 15: OOooOOo * ooOoO0o + II111iiii . I1Ii111 . oO0o
if 46 - 46: IiII % I1Ii111 + iIii1I11I1II1 * I1IiiI
if ( remote_rloc . address > OoOOOO00 . address ) :
ii1iI1iI1 = "exit"
self . request_nonce_sent = None
else :
ii1iI1iI1 = "stay in"
self . echo_nonce_sent = None
if 64 - 64: I1ii11iIi11i * Ii1I * Oo0Ooo % IiII % ooOoO0o
if 55 - 55: II111iiii - I1Ii111 - OOooOOo % Ii1I
iI1I1iII1iII = bold ( "collision" , False )
II1Ooo0000o00OO = red ( OoOOOO00 . print_address_no_iid ( ) , False )
Oo0O = red ( remote_rloc . print_address_no_iid ( ) , False )
lprint ( "Echo nonce {}, {} -> {}, {} request-nonce mode" . format ( iI1I1iII1iII ,
II1Ooo0000o00OO , Oo0O , ii1iI1iI1 ) )
if 11 - 11: O0
if 96 - 96: iII111i + o0oOOo0O0Ooo
if 10 - 10: i11iIiiIii . OoooooooOO . O0 % ooOoO0o / OoO0O00
if 36 - 36: I1IiiI % i1IIi + OoO0O00
if 59 - 59: i11iIiiIii - i11iIiiIii + I1IiiI
if ( self . echo_nonce_sent != None ) :
i11III1I = self . echo_nonce_sent
Oo0ooo0Ooo = bold ( "Echoing" , False )
lprint ( "{} nonce 0x{} to {}" . format ( Oo0ooo0Ooo ,
lisp_hex_string ( i11III1I ) , red ( self . rloc_str , False ) ) )
self . last_echo_nonce_sent = lisp_get_timestamp ( )
self . echo_nonce_sent = None
return ( i11III1I )
if 4 - 4: Oo0Ooo * O0 - oO0o % ooOoO0o + OoOoOO00
if 3 - 3: OoOoOO00
if 91 - 91: O0 - I11i % I1Ii111
if 46 - 46: ooOoO0o / I1IiiI . IiII % OoO0O00 / i11iIiiIii
if 13 - 13: I1Ii111 % o0oOOo0O0Ooo + OOooOOo + I1Ii111 + i11iIiiIii - I1ii11iIi11i
if 70 - 70: II111iiii * II111iiii . I1IiiI
if 11 - 11: iII111i
i11III1I = self . request_nonce_sent
i1OooO00oO00o = self . last_request_nonce_sent
if ( i11III1I and i1OooO00oO00o != None ) :
if ( time . time ( ) - i1OooO00oO00o >= LISP_NONCE_ECHO_INTERVAL ) :
self . request_nonce_sent = None
lprint ( "Stop request-nonce mode for {}, nonce 0x{}" . format ( red ( self . rloc_str , False ) , lisp_hex_string ( i11III1I ) ) )
if 14 - 14: I1ii11iIi11i * Oo0Ooo + i11iIiiIii % OOooOOo - oO0o
return ( None )
if 11 - 11: I1ii11iIi11i / O0 + II111iiii
if 95 - 95: I1Ii111 + IiII * iIii1I11I1II1
if 17 - 17: OoO0O00 - Oo0Ooo * O0 / Ii1I
if 19 - 19: i1IIi - iIii1I11I1II1 . I11i
if 2 - 2: Ii1I
if 12 - 12: i11iIiiIii - iIii1I11I1II1 * IiII * iII111i
if 19 - 19: O0 + oO0o + o0oOOo0O0Ooo
if 81 - 81: iIii1I11I1II1
if 51 - 51: o0oOOo0O0Ooo . I1ii11iIi11i * Ii1I / Oo0Ooo * II111iiii / O0
if ( i11III1I == None ) :
i11III1I = lisp_get_data_nonce ( )
if ( self . recently_requested ( ) ) : return ( i11III1I )
if 44 - 44: i11iIiiIii % I1Ii111 % oO0o + I11i * oO0o . Ii1I
self . request_nonce_sent = i11III1I
lprint ( "Start request-nonce mode for {}, nonce 0x{}" . format ( red ( self . rloc_str , False ) , lisp_hex_string ( i11III1I ) ) )
if 89 - 89: OoooooooOO % II111iiii - OoO0O00 % i11iIiiIii
self . last_new_request_nonce_sent = lisp_get_timestamp ( )
if 7 - 7: IiII
if 15 - 15: Oo0Ooo + iII111i + I1IiiI * o0oOOo0O0Ooo
if 33 - 33: o0oOOo0O0Ooo * Oo0Ooo
if 88 - 88: I1Ii111 % OOooOOo - OoOoOO00 - OoOoOO00 . I1IiiI
if 52 - 52: II111iiii / II111iiii / I1IiiI - I1Ii111
if ( lisp_i_am_itr == False ) : return ( i11III1I | 0x80000000 )
self . send_request_ipc ( ipc_socket , i11III1I )
else :
lprint ( "Continue request-nonce mode for {}, nonce 0x{}" . format ( red ( self . rloc_str , False ) , lisp_hex_string ( i11III1I ) ) )
if 91 - 91: I1IiiI + o0oOOo0O0Ooo % II111iiii + OoO0O00
if 66 - 66: iIii1I11I1II1 * II111iiii % Oo0Ooo % I1IiiI - Ii1I
if 59 - 59: IiII % oO0o
if 21 - 21: OoooooooOO % OoOoOO00 - OoOoOO00 / I1ii11iIi11i / o0oOOo0O0Ooo
if 15 - 15: ooOoO0o / ooOoO0o % OoooooooOO . I1Ii111
if 93 - 93: I1ii11iIi11i * I1ii11iIi11i / OoooooooOO
if 6 - 6: I1ii11iIi11i * Oo0Ooo + iIii1I11I1II1
self . last_request_nonce_sent = lisp_get_timestamp ( )
return ( i11III1I | 0x80000000 )
if 19 - 19: O0 % II111iiii * o0oOOo0O0Ooo
if 27 - 27: OOooOOo * IiII / i11iIiiIii - oO0o + II111iiii
def request_nonce_timeout ( self ) :
if ( self . request_nonce_sent == None ) : return ( False )
if ( self . request_nonce_sent == self . echo_nonce_rcvd ) : return ( False )
if 43 - 43: I1ii11iIi11i - II111iiii
iIIiI1iiI = time . time ( ) - self . last_request_nonce_sent
OOo = self . last_echo_nonce_rcvd
return ( iIIiI1iiI >= LISP_NONCE_ECHO_INTERVAL and OOo == None )
if 22 - 22: i1IIi % ooOoO0o - I11i . II111iiii * OoOoOO00
if 10 - 10: OoOoOO00 / OoooooooOO . iIii1I11I1II1 / I1IiiI / I1ii11iIi11i - Oo0Ooo
def recently_requested ( self ) :
OOo = self . last_request_nonce_sent
if ( OOo == None ) : return ( False )
if 22 - 22: O0
iIIiI1iiI = time . time ( ) - OOo
return ( iIIiI1iiI <= LISP_NONCE_ECHO_INTERVAL )
if 72 - 72: Oo0Ooo % Oo0Ooo - o0oOOo0O0Ooo
if 28 - 28: Ii1I + I1IiiI - oO0o + oO0o * I11i + oO0o
def recently_echoed ( self ) :
if ( self . request_nonce_sent == None ) : return ( True )
if 70 - 70: i1IIi % OoO0O00 / i1IIi
if 30 - 30: OoOoOO00 - i11iIiiIii
if 94 - 94: OoOoOO00 % iII111i
if 39 - 39: OoOoOO00 + I1Ii111 % O0
OOo = self . last_good_echo_nonce_rcvd
if ( OOo == None ) : OOo = 0
iIIiI1iiI = time . time ( ) - OOo
if ( iIIiI1iiI <= LISP_NONCE_ECHO_INTERVAL ) : return ( True )
if 26 - 26: ooOoO0o + OoOoOO00
if 17 - 17: I1ii11iIi11i - iII111i % Oo0Ooo * O0 % O0 * OOooOOo
if 6 - 6: I1Ii111
if 46 - 46: II111iiii * I1Ii111
if 23 - 23: i1IIi - O0
if 6 - 6: ooOoO0o % OoooooooOO * I1Ii111 - IiII
OOo = self . last_new_request_nonce_sent
if ( OOo == None ) : OOo = 0
iIIiI1iiI = time . time ( ) - OOo
return ( iIIiI1iiI <= LISP_NONCE_ECHO_INTERVAL )
if 24 - 24: I11i / iIii1I11I1II1 . OoooooooOO % OoOoOO00 . Ii1I
if 73 - 73: I1Ii111
def change_state ( self , rloc ) :
if ( rloc . up_state ( ) and self . recently_echoed ( ) == False ) :
i1IiIiiiii11 = bold ( "down" , False )
oooo = lisp_print_elapsed ( self . last_good_echo_nonce_rcvd )
lprint ( "Take {} {}, last good echo: {}" . format ( red ( self . rloc_str , False ) , i1IiIiiiii11 , oooo ) )
if 65 - 65: Oo0Ooo . OoOoOO00 . OOooOOo % o0oOOo0O0Ooo + OoO0O00
rloc . state = LISP_RLOC_NO_ECHOED_NONCE_STATE
rloc . last_state_change = lisp_get_timestamp ( )
return
if 53 - 53: Oo0Ooo * I11i - Ii1I % OoO0O00 - OoOoOO00 - iII111i
if 21 - 21: II111iiii + OoO0O00 - Oo0Ooo + I1IiiI
if ( rloc . no_echoed_nonce_state ( ) == False ) : return
if 20 - 20: OoO0O00
if ( self . recently_requested ( ) == False ) :
o00OooooOOOO = bold ( "up" , False )
lprint ( "Bring {} {}, retry request-nonce mode" . format ( red ( self . rloc_str , False ) , o00OooooOOOO ) )
if 89 - 89: O0 + IiII * I1Ii111
rloc . state = LISP_RLOC_UP_STATE
rloc . last_state_change = lisp_get_timestamp ( )
if 30 - 30: OoOoOO00
if 39 - 39: I1ii11iIi11i + o0oOOo0O0Ooo + I1Ii111 + IiII
if 48 - 48: I1Ii111 / ooOoO0o . iIii1I11I1II1
def print_echo_nonce ( self ) :
ooo0OOoo = lisp_print_elapsed ( self . last_request_nonce_sent )
oO0o00O = lisp_print_elapsed ( self . last_good_echo_nonce_rcvd )
if 7 - 7: Oo0Ooo * OoO0O00 - II111iiii % I1Ii111 . Oo0Ooo . Oo0Ooo
iiII1iIi1ii1i = lisp_print_elapsed ( self . last_echo_nonce_sent )
i11IiI1 = lisp_print_elapsed ( self . last_request_nonce_rcvd )
o00oOOO = space ( 4 )
if 62 - 62: ooOoO0o * I1ii11iIi11i / iII111i * OOooOOo / OOooOOo - iII111i
I1i = "Nonce-Echoing:\n"
I1i += ( "{}Last request-nonce sent: {}\n{}Last echo-nonce " + "received: {}\n" ) . format ( o00oOOO , ooo0OOoo , o00oOOO , oO0o00O )
if 59 - 59: Ii1I % iII111i / II111iiii + I1IiiI * ooOoO0o
I1i += ( "{}Last request-nonce received: {}\n{}Last echo-nonce " + "sent: {}" ) . format ( o00oOOO , i11IiI1 , o00oOOO , iiII1iIi1ii1i )
if 100 - 100: I1ii11iIi11i
if 81 - 81: I1ii11iIi11i % iII111i
return ( I1i )
if 22 - 22: OoooooooOO + o0oOOo0O0Ooo . I11i + I1IiiI + OoooooooOO . OoOoOO00
if 93 - 93: I1IiiI
if 89 - 89: OoooooooOO % i11iIiiIii + I1Ii111
if 12 - 12: OoOoOO00 * ooOoO0o
if 59 - 59: II111iiii * OoooooooOO - OoooooooOO
if 33 - 33: O0 . i11iIiiIii % o0oOOo0O0Ooo
if 50 - 50: ooOoO0o
if 81 - 81: i11iIiiIii * iIii1I11I1II1 / Oo0Ooo * OOooOOo
if 83 - 83: i11iIiiIii - I1IiiI * i11iIiiIii
class lisp_keys ( ) :
def __init__ ( self , key_id , do_curve = True , do_chacha = use_chacha ,
do_poly = use_poly ) :
self . uptime = lisp_get_timestamp ( )
self . last_rekey = None
self . rekey_count = 0
self . use_count = 0
self . key_id = key_id
self . cipher_suite = LISP_CS_1024
self . dh_g_value = LISP_CS_1024_G
self . dh_p_value = LISP_CS_1024_P
self . curve25519 = None
self . cipher_suite_string = ""
if ( do_curve ) :
if ( do_chacha ) :
self . cipher_suite = LISP_CS_25519_CHACHA
self . cipher_suite_string = "chacha"
elif ( os . getenv ( "LISP_USE_AES_GCM" ) != None ) :
self . cipher_suite = LISP_CS_25519_GCM
self . cipher_suite_string = "aes-gcm"
else :
self . cipher_suite = LISP_CS_25519_CBC
self . cipher_suite_string = "aes-cbc"
if 59 - 59: iII111i - OoooooooOO / ooOoO0o + I1ii11iIi11i . o0oOOo0O0Ooo - iII111i
self . local_private_key = random . randint ( 0 , 2 ** 128 - 1 )
Iiii11 = lisp_hex_string ( self . local_private_key ) . zfill ( 32 )
self . curve25519 = curve25519 . Private ( Iiii11 )
else :
self . local_private_key = random . randint ( 0 , 0x1fff )
if 29 - 29: oO0o
self . local_public_key = self . compute_public_key ( )
self . remote_public_key = None
self . shared_key = None
self . encrypt_key = None
self . icv_key = None
self . icv = poly1305 if do_poly else hashlib . sha256
self . iv = None
self . get_iv ( )
self . do_poly = do_poly
if 26 - 26: O0 % OOooOOo - IiII . OOooOOo
if 70 - 70: o0oOOo0O0Ooo + I11i / iII111i + ooOoO0o / I1IiiI
def copy_keypair ( self , key ) :
self . local_private_key = key . local_private_key
self . local_public_key = key . local_public_key
self . curve25519 = key . curve25519
if 33 - 33: OoooooooOO . O0
if 59 - 59: iIii1I11I1II1
def get_iv ( self ) :
if ( self . iv == None ) :
self . iv = random . randint ( 0 , LISP_16_128_MASK )
else :
self . iv += 1
if 45 - 45: O0
Ii1IiiiI1ii = self . iv
if ( self . cipher_suite == LISP_CS_25519_CHACHA ) :
Ii1IiiiI1ii = struct . pack ( "Q" , Ii1IiiiI1ii & LISP_8_64_MASK )
elif ( self . cipher_suite == LISP_CS_25519_GCM ) :
O0OoO0OO = struct . pack ( "I" , ( Ii1IiiiI1ii >> 64 ) & LISP_4_32_MASK )
oooooo0 = struct . pack ( "Q" , Ii1IiiiI1ii & LISP_8_64_MASK )
Ii1IiiiI1ii = O0OoO0OO + oooooo0
else :
Ii1IiiiI1ii = struct . pack ( "QQ" , Ii1IiiiI1ii >> 64 , Ii1IiiiI1ii & LISP_8_64_MASK )
return ( Ii1IiiiI1ii )
if 26 - 26: OOooOOo + Oo0Ooo
if 71 - 71: I1IiiI . ooOoO0o
def key_length ( self , key ) :
if ( type ( key ) != str ) : key = self . normalize_pub_key ( key )
return ( len ( key ) / 2 )
if 43 - 43: I1ii11iIi11i * OOooOOo
if 1 - 1: OoO0O00 * ooOoO0o + IiII . oO0o / ooOoO0o
def print_key ( self , key ) :
III = self . normalize_pub_key ( key )
return ( "0x{}...{}({})" . format ( III [ 0 : 4 ] , III [ - 4 : : ] , self . key_length ( III ) ) )
if 91 - 91: Ii1I + I11i - Oo0Ooo % OoOoOO00 . iII111i
if 51 - 51: OOooOOo / I11i
def normalize_pub_key ( self , key ) :
if ( type ( key ) == str ) :
if ( self . curve25519 ) : return ( binascii . hexlify ( key ) )
return ( key )
if 51 - 51: ooOoO0o * oO0o - I1Ii111 + iII111i
key = lisp_hex_string ( key ) . zfill ( 256 )
return ( key )
if 46 - 46: o0oOOo0O0Ooo - i11iIiiIii % OoO0O00 / Ii1I - OoOoOO00
if 88 - 88: oO0o * I1IiiI / OoO0O00 - OOooOOo / i1IIi . I1Ii111
def print_keys ( self , do_bold = True ) :
II1Ooo0000o00OO = bold ( "local-key: " , False ) if do_bold else "local-key: "
if ( self . local_public_key == None ) :
II1Ooo0000o00OO += "none"
else :
II1Ooo0000o00OO += self . print_key ( self . local_public_key )
if 26 - 26: i11iIiiIii - ooOoO0o
Oo0O = bold ( "remote-key: " , False ) if do_bold else "remote-key: "
if ( self . remote_public_key == None ) :
Oo0O += "none"
else :
Oo0O += self . print_key ( self . remote_public_key )
if 45 - 45: ooOoO0o + II111iiii % iII111i
o00OoOo0 = "ECDH" if ( self . curve25519 ) else "DH"
Iii1I = self . cipher_suite
return ( "{} cipher-suite: {}, {}, {}" . format ( o00OoOo0 , Iii1I , II1Ooo0000o00OO , Oo0O ) )
if 33 - 33: OOooOOo / oO0o . i11iIiiIii * iIii1I11I1II1
if 75 - 75: OOooOOo - OoO0O00
def compare_keys ( self , keys ) :
if ( self . dh_g_value != keys . dh_g_value ) : return ( False )
if ( self . dh_p_value != keys . dh_p_value ) : return ( False )
if ( self . remote_public_key != keys . remote_public_key ) : return ( False )
return ( True )
if 91 - 91: O0 . I1Ii111
if 31 - 31: O0 - IiII * i11iIiiIii * i1IIi
def compute_public_key ( self ) :
if ( self . curve25519 ) : return ( self . curve25519 . get_public ( ) . public )
if 78 - 78: ooOoO0o * OoOoOO00 . Ii1I . OoOoOO00 % iIii1I11I1II1
Iiii11 = self . local_private_key
o0 = self . dh_g_value
i111 = self . dh_p_value
return ( int ( ( o0 ** Iiii11 ) % i111 ) )
if 63 - 63: ooOoO0o % I1IiiI . OOooOOo - ooOoO0o / Oo0Ooo % I1IiiI
if 39 - 39: o0oOOo0O0Ooo . i1IIi % oO0o / I11i % O0
def compute_shared_key ( self , ed , print_shared = False ) :
Iiii11 = self . local_private_key
o0O0OOooO = self . remote_public_key
if 1 - 1: I1Ii111 * OoO0O00 - iII111i
O0OoO0 = bold ( "Compute {} shared-key" . format ( ed ) , False )
lprint ( "{}, key-material: {}" . format ( O0OoO0 , self . print_keys ( ) ) )
if 73 - 73: i11iIiiIii - I1IiiI * I1IiiI
if ( self . curve25519 ) :
ooo0ooOoOOoO = curve25519 . Public ( o0O0OOooO )
self . shared_key = self . curve25519 . get_shared_key ( ooo0ooOoOOoO )
else :
i111 = self . dh_p_value
self . shared_key = ( o0O0OOooO ** Iiii11 ) % i111
if 8 - 8: i11iIiiIii / ooOoO0o
if 33 - 33: I1Ii111 * IiII - O0 + I1IiiI / IiII
if 19 - 19: i1IIi % II111iiii
if 85 - 85: IiII - o0oOOo0O0Ooo % OOooOOo - II111iiii
if 56 - 56: Ii1I * i11iIiiIii
if 92 - 92: II111iiii - O0 . I1Ii111
if 59 - 59: OoOoOO00
if ( print_shared ) :
III = self . print_key ( self . shared_key )
lprint ( "Computed shared-key: {}" . format ( III ) )
if 47 - 47: II111iiii - I1ii11iIi11i - Ii1I
if 9 - 9: I1ii11iIi11i - IiII
if 64 - 64: i1IIi
if 71 - 71: IiII * o0oOOo0O0Ooo
if 99 - 99: o0oOOo0O0Ooo
self . compute_encrypt_icv_keys ( )
if 28 - 28: OoooooooOO % O0 - OOooOOo / o0oOOo0O0Ooo / I1IiiI
if 41 - 41: II111iiii * IiII / OoO0O00 . oO0o
if 50 - 50: OoooooooOO + iIii1I11I1II1 / oO0o / OOooOOo . i11iIiiIii . ooOoO0o
if 75 - 75: iIii1I11I1II1 % ooOoO0o / OOooOOo - iII111i % i11iIiiIii
self . rekey_count += 1
self . last_rekey = lisp_get_timestamp ( )
if 11 - 11: I11i . Ii1I
if 87 - 87: OOooOOo + OOooOOo
def compute_encrypt_icv_keys ( self ) :
iiI11II1I = hashlib . sha256
if ( self . curve25519 ) :
i11 = self . shared_key
else :
i11 = lisp_hex_string ( self . shared_key )
if 75 - 75: iIii1I11I1II1 / II111iiii / Ii1I / OoOoOO00
if 77 - 77: OoOoOO00
if 31 - 31: IiII / iII111i
if 97 - 97: OoO0O00 + iIii1I11I1II1
if 79 - 79: ooOoO0o + oO0o - II111iiii . Oo0Ooo
II1Ooo0000o00OO = self . local_public_key
if ( type ( II1Ooo0000o00OO ) != long ) : II1Ooo0000o00OO = int ( binascii . hexlify ( II1Ooo0000o00OO ) , 16 )
Oo0O = self . remote_public_key
if ( type ( Oo0O ) != long ) : Oo0O = int ( binascii . hexlify ( Oo0O ) , 16 )
iIiIi1i1ii11 = "0001" + "lisp-crypto" + lisp_hex_string ( II1Ooo0000o00OO ^ Oo0O ) + "0100"
if 86 - 86: I1Ii111 * ooOoO0o - ooOoO0o . I1IiiI
Ooooo0o0 = hmac . new ( iIiIi1i1ii11 , i11 , iiI11II1I ) . hexdigest ( )
Ooooo0o0 = int ( Ooooo0o0 , 16 )
if 59 - 59: ooOoO0o % Oo0Ooo - oO0o + IiII
if 33 - 33: Ii1I + OoOoOO00 - I1ii11iIi11i + iIii1I11I1II1 % i1IIi * IiII
if 21 - 21: O0 * ooOoO0o % OoO0O00
if 14 - 14: O0 / I1Ii111 / ooOoO0o + IiII - IiII
IiiI11iIi = ( Ooooo0o0 >> 128 ) & LISP_16_128_MASK
I1I111iIiI = Ooooo0o0 & LISP_16_128_MASK
self . encrypt_key = lisp_hex_string ( IiiI11iIi ) . zfill ( 32 )
I1Ii11I1i1iii = 32 if self . do_poly else 40
self . icv_key = lisp_hex_string ( I1I111iIiI ) . zfill ( I1Ii11I1i1iii )
if 83 - 83: O0 / OoO0O00
if 62 - 62: I11i
def do_icv ( self , packet , nonce ) :
if ( self . icv_key == None ) : return ( "" )
if ( self . do_poly ) :
o00O00oOooo = self . icv . poly1305aes
oooii111I1I1I = self . icv . binascii . hexlify
nonce = oooii111I1I1I ( nonce )
iIIiIi1IiI1 = o00O00oOooo ( self . encrypt_key , self . icv_key , nonce , packet )
iIIiIi1IiI1 = oooii111I1I1I ( iIIiIi1IiI1 )
else :
Iiii11 = binascii . unhexlify ( self . icv_key )
iIIiIi1IiI1 = hmac . new ( Iiii11 , packet , self . icv ) . hexdigest ( )
iIIiIi1IiI1 = iIIiIi1IiI1 [ 0 : 40 ]
if 80 - 80: II111iiii / OoOoOO00 % I1ii11iIi11i . iIii1I11I1II1 % I11i . o0oOOo0O0Ooo
return ( iIIiIi1IiI1 )
if 86 - 86: oO0o + iII111i % OoooooooOO . IiII
if 80 - 80: IiII . o0oOOo0O0Ooo
def add_key_by_nonce ( self , nonce ) :
if ( lisp_crypto_keys_by_nonce . has_key ( nonce ) == False ) :
lisp_crypto_keys_by_nonce [ nonce ] = [ None , None , None , None ]
if 8 - 8: o0oOOo0O0Ooo . II111iiii . iII111i - i11iIiiIii
lisp_crypto_keys_by_nonce [ nonce ] [ self . key_id ] = self
if 50 - 50: Ii1I . O0 % OoO0O00 . oO0o + Ii1I . OoOoOO00
if 69 - 69: i11iIiiIii + i11iIiiIii . i11iIiiIii - i11iIiiIii % Ii1I / iII111i
def delete_key_by_nonce ( self , nonce ) :
if ( lisp_crypto_keys_by_nonce . has_key ( nonce ) == False ) : return
lisp_crypto_keys_by_nonce . pop ( nonce )
if 59 - 59: OoooooooOO
if 96 - 96: Ii1I
def add_key_by_rloc ( self , addr_str , encap ) :
o0O00o00Ooo = lisp_crypto_keys_by_rloc_encap if encap else lisp_crypto_keys_by_rloc_decap
if 16 - 16: OOooOOo . II111iiii - Ii1I - OoooooooOO
if 83 - 83: i11iIiiIii - Oo0Ooo
if ( o0O00o00Ooo . has_key ( addr_str ) == False ) :
o0O00o00Ooo [ addr_str ] = [ None , None , None , None ]
if 5 - 5: I1ii11iIi11i . II111iiii . i1IIi
o0O00o00Ooo [ addr_str ] [ self . key_id ] = self
if 35 - 35: o0oOOo0O0Ooo + OoO0O00 - I1ii11iIi11i
if 24 - 24: II111iiii
if 23 - 23: Oo0Ooo - iII111i
if 79 - 79: I11i . O0 - i1IIi
if 42 - 42: oO0o - i11iIiiIii % oO0o - I1Ii111 * O0 / II111iiii
if ( encap == False ) :
lisp_write_ipc_decap_key ( addr_str , o0O00o00Ooo [ addr_str ] )
if 5 - 5: Oo0Ooo
if 84 - 84: I1ii11iIi11i
if 53 - 53: oO0o
def encode_lcaf ( self , rloc_addr ) :
I1I1 = self . normalize_pub_key ( self . local_public_key )
oOoooo0OooO = self . key_length ( I1I1 )
OooO0O = ( 6 + oOoooo0OooO + 2 )
if ( rloc_addr != None ) : OooO0O += rloc_addr . addr_length ( )
if 73 - 73: I1IiiI / O0 % iII111i * II111iiii
oOo = struct . pack ( "HBBBBHBB" , socket . htons ( LISP_AFI_LCAF ) , 0 , 0 ,
LISP_LCAF_SECURITY_TYPE , 0 , socket . htons ( OooO0O ) , 1 , 0 )
if 99 - 99: Ii1I + IiII % i11iIiiIii
if 41 - 41: I1IiiI % OOooOOo
if 30 - 30: i11iIiiIii * Oo0Ooo . II111iiii + I1ii11iIi11i / o0oOOo0O0Ooo % I1Ii111
if 78 - 78: I1ii11iIi11i + OoooooooOO - I1IiiI * OoOoOO00 * iII111i
if 7 - 7: OOooOOo . IiII . I1Ii111 / Ii1I / Oo0Ooo
if 83 - 83: I11i / Oo0Ooo
Iii1I = self . cipher_suite
oOo += struct . pack ( "BBH" , Iii1I , 0 , socket . htons ( oOoooo0OooO ) )
if 23 - 23: iIii1I11I1II1
if 10 - 10: I11i - o0oOOo0O0Ooo % OoooooooOO - I1ii11iIi11i
if 64 - 64: OoO0O00 / I1IiiI
if 23 - 23: I11i * I1Ii111 * o0oOOo0O0Ooo - I1IiiI % OoOoOO00 + o0oOOo0O0Ooo
for II11iIII1i1I in range ( 0 , oOoooo0OooO * 2 , 16 ) :
Iiii11 = int ( I1I1 [ II11iIII1i1I : II11iIII1i1I + 16 ] , 16 )
oOo += struct . pack ( "Q" , byte_swap_64 ( Iiii11 ) )
if 41 - 41: IiII * OoooooooOO . ooOoO0o % i11iIiiIii
if 11 - 11: iIii1I11I1II1 . I1Ii111 - Oo0Ooo / I11i + II111iiii
if 29 - 29: I11i . i11iIiiIii + i1IIi - Ii1I + O0 . I1IiiI
if 8 - 8: o0oOOo0O0Ooo
if 78 - 78: i1IIi - Oo0Ooo
if ( rloc_addr ) :
oOo += struct . pack ( "H" , socket . htons ( rloc_addr . afi ) )
oOo += rloc_addr . pack_address ( )
if 48 - 48: Ii1I - OoooooooOO + I1Ii111 % o0oOOo0O0Ooo - OoOoOO00 . I1IiiI
return ( oOo )
if 42 - 42: I1Ii111
if 70 - 70: o0oOOo0O0Ooo / I11i + oO0o % I1IiiI % Oo0Ooo + OoO0O00
def decode_lcaf ( self , packet , lcaf_len ) :
if 80 - 80: OOooOOo
if 12 - 12: Ii1I
if 2 - 2: OoooooooOO
if 100 - 100: Oo0Ooo / O0 * i11iIiiIii * OoooooooOO
if ( lcaf_len == 0 ) :
IIiI1I11ii1i = "HHBBH"
i1II1i1iiI1 = struct . calcsize ( IIiI1I11ii1i )
if ( len ( packet ) < i1II1i1iiI1 ) : return ( None )
if 46 - 46: O0 % OoooooooOO
o0o0O00oOo , I1IiII , o0O00o0o , I1IiII , lcaf_len = struct . unpack ( IIiI1I11ii1i , packet [ : i1II1i1iiI1 ] )
if 31 - 31: ooOoO0o % I1IiiI % IiII / I1Ii111
if 74 - 74: i1IIi + oO0o - iIii1I11I1II1 . Oo0Ooo
if ( o0O00o0o != LISP_LCAF_SECURITY_TYPE ) :
packet = packet [ lcaf_len + 6 : : ]
return ( packet )
if 70 - 70: iII111i
lcaf_len = socket . ntohs ( lcaf_len )
packet = packet [ i1II1i1iiI1 : : ]
if 51 - 51: O0 - I1ii11iIi11i / I11i * II111iiii + OoO0O00 % I1ii11iIi11i
if 58 - 58: oO0o + IiII % iII111i - Ii1I - OOooOOo % Ii1I
if 86 - 86: o0oOOo0O0Ooo
if 15 - 15: oO0o - iIii1I11I1II1 - II111iiii - IiII % I1ii11iIi11i
if 80 - 80: IiII * iII111i . i1IIi % Ii1I % I1ii11iIi11i + ooOoO0o
if 6 - 6: I1ii11iIi11i . oO0o . OoO0O00 + IiII
o0O00o0o = LISP_LCAF_SECURITY_TYPE
IIiI1I11ii1i = "BBBBH"
i1II1i1iiI1 = struct . calcsize ( IIiI1I11ii1i )
if ( len ( packet ) < i1II1i1iiI1 ) : return ( None )
if 65 - 65: I1ii11iIi11i / ooOoO0o
II1I1 , I1IiII , Iii1I , I1IiII , oOoooo0OooO = struct . unpack ( IIiI1I11ii1i ,
packet [ : i1II1i1iiI1 ] )
if 53 - 53: i11iIiiIii - II111iiii % I1IiiI . OoO0O00
if 67 - 67: I1ii11iIi11i * i11iIiiIii + Ii1I % Ii1I + iIii1I11I1II1 - OOooOOo
if 10 - 10: I1IiiI - I1Ii111 - I1ii11iIi11i / iII111i
if 10 - 10: Ii1I * I1IiiI % I1Ii111 + iII111i . Ii1I
if 40 - 40: I1ii11iIi11i
if 78 - 78: IiII / iII111i * Ii1I . OOooOOo . oO0o - I1Ii111
packet = packet [ i1II1i1iiI1 : : ]
oOoooo0OooO = socket . ntohs ( oOoooo0OooO )
if ( len ( packet ) < oOoooo0OooO ) : return ( None )
if 39 - 39: ooOoO0o . i1IIi + OoooooooOO . iII111i - i11iIiiIii % I1Ii111
if 38 - 38: oO0o
if 9 - 9: I11i . OoO0O00 . oO0o / OoooooooOO
if 59 - 59: iIii1I11I1II1 + i1IIi % II111iiii
iii1IiI = [ LISP_CS_25519_CBC , LISP_CS_25519_GCM , LISP_CS_25519_CHACHA ,
LISP_CS_1024 ]
if ( Iii1I not in iii1IiI ) :
lprint ( "Cipher-suites {} supported, received {}" . format ( iii1IiI ,
Iii1I ) )
packet = packet [ oOoooo0OooO : : ]
return ( packet )
if 87 - 87: I1IiiI - O0 - I11i * I1Ii111 % I1Ii111
if 99 - 99: O0 * i11iIiiIii % OOooOOo * II111iiii
self . cipher_suite = Iii1I
if 98 - 98: O0 + iIii1I11I1II1
if 94 - 94: i1IIi * OoO0O00 * OoOoOO00
if 93 - 93: ooOoO0o / OOooOOo * O0
if 17 - 17: OoO0O00 / ooOoO0o % I1IiiI
if 47 - 47: Oo0Ooo * OoO0O00 / o0oOOo0O0Ooo * I1IiiI
I1I1 = 0
for II11iIII1i1I in range ( 0 , oOoooo0OooO , 8 ) :
Iiii11 = byte_swap_64 ( struct . unpack ( "Q" , packet [ II11iIII1i1I : II11iIII1i1I + 8 ] ) [ 0 ] )
I1I1 <<= 64
I1I1 |= Iiii11
if 60 - 60: I1ii11iIi11i / IiII . i11iIiiIii / OoO0O00 % II111iiii
self . remote_public_key = I1I1
if 6 - 6: iII111i % o0oOOo0O0Ooo + I1Ii111
if 91 - 91: o0oOOo0O0Ooo + O0 * oO0o * IiII * I1ii11iIi11i
if 83 - 83: OoooooooOO
if 52 - 52: o0oOOo0O0Ooo / OoOoOO00 % oO0o % OoO0O00 / IiII % o0oOOo0O0Ooo
if 88 - 88: OOooOOo / i11iIiiIii / Ii1I / i11iIiiIii * I1ii11iIi11i % I11i
if ( self . curve25519 ) :
Iiii11 = lisp_hex_string ( self . remote_public_key )
Iiii11 = Iiii11 . zfill ( 64 )
II1I1iI1i1IiI = ""
for II11iIII1i1I in range ( 0 , len ( Iiii11 ) , 2 ) :
II1I1iI1i1IiI += chr ( int ( Iiii11 [ II11iIII1i1I : II11iIII1i1I + 2 ] , 16 ) )
if 9 - 9: oO0o / OoooooooOO / OOooOOo * i11iIiiIii - ooOoO0o + I1Ii111
self . remote_public_key = II1I1iI1i1IiI
if 69 - 69: O0 . I1Ii111 - O0
if 58 - 58: OoOoOO00 + I1ii11iIi11i
packet = packet [ oOoooo0OooO : : ]
return ( packet )
if 4 - 4: II111iiii % oO0o + o0oOOo0O0Ooo / i11iIiiIii
if 16 - 16: I1IiiI . oO0o . OOooOOo % ooOoO0o - OOooOOo - o0oOOo0O0Ooo
if 30 - 30: IiII
if 34 - 34: oO0o - II111iiii - o0oOOo0O0Ooo + iII111i + I1Ii111
if 70 - 70: OoooooooOO + OoO0O00 * Oo0Ooo
if 20 - 20: i11iIiiIii - II111iiii - ooOoO0o % oO0o . ooOoO0o
if 50 - 50: iIii1I11I1II1 + I1Ii111 - I11i - OoooooooOO
if 84 - 84: OoOoOO00 - I11i
class lisp_thread ( ) :
def __init__ ( self , name ) :
self . thread_name = name
self . thread_number = - 1
self . number_of_pcap_threads = 0
self . number_of_worker_threads = 0
self . input_queue = Queue . Queue ( )
self . input_stats = lisp_stats ( )
self . lisp_packet = lisp_packet ( None )
if 80 - 80: i11iIiiIii % OOooOOo - Oo0Ooo % OOooOOo
if 89 - 89: Ii1I * I11i + OoOoOO00 / i11iIiiIii
if 68 - 68: OoooooooOO * I11i
if 86 - 86: o0oOOo0O0Ooo / OoOoOO00
if 40 - 40: iII111i
if 62 - 62: ooOoO0o / OOooOOo
if 74 - 74: iII111i % I1Ii111 / I1Ii111 - iIii1I11I1II1 - II111iiii + OOooOOo
if 92 - 92: I11i % I1Ii111
if 18 - 18: ooOoO0o + I1Ii111 / OOooOOo / oO0o + iIii1I11I1II1 % IiII
if 94 - 94: I11i
if 37 - 37: oO0o
if 52 - 52: I1ii11iIi11i * I1IiiI . OOooOOo + i1IIi % oO0o / iIii1I11I1II1
if 68 - 68: I1Ii111 - OoOoOO00 . i11iIiiIii + o0oOOo0O0Ooo
if 71 - 71: i11iIiiIii / i1IIi * I1IiiI / OoOoOO00
if 33 - 33: I11i . Oo0Ooo
if 89 - 89: iII111i + i1IIi - IiII + ooOoO0o . II111iiii
if 85 - 85: iIii1I11I1II1 - Ii1I * Oo0Ooo . oO0o + I1Ii111
if 13 - 13: O0 + iIii1I11I1II1 % II111iiii + iIii1I11I1II1
class lisp_control_header ( ) :
def __init__ ( self ) :
self . type = 0
self . record_count = 0
self . nonce = 0
self . rloc_probe = False
self . smr_bit = False
self . smr_invoked_bit = False
self . ddt_bit = False
self . to_etr = False
self . to_ms = False
self . info_reply = False
if 85 - 85: I1IiiI * iIii1I11I1II1 . iII111i / iII111i
if 43 - 43: I1IiiI
def decode ( self , packet ) :
IIiI1I11ii1i = "BBBBQ"
i1II1i1iiI1 = struct . calcsize ( IIiI1I11ii1i )
if ( len ( packet ) < i1II1i1iiI1 ) : return ( False )
if 78 - 78: OoO0O00 % II111iiii + OoOoOO00 / I1IiiI
IIII11i1Ii , I11Iii11i1Ii , oo00000ooOooO , self . record_count , self . nonce = struct . unpack ( IIiI1I11ii1i , packet [ : i1II1i1iiI1 ] )
if 56 - 56: I1IiiI . IiII
if 53 - 53: ooOoO0o - OoOoOO00 + IiII
self . type = IIII11i1Ii >> 4
if ( self . type == LISP_MAP_REQUEST ) :
self . smr_bit = True if ( IIII11i1Ii & 0x01 ) else False
self . rloc_probe = True if ( IIII11i1Ii & 0x02 ) else False
self . smr_invoked_bit = True if ( I11Iii11i1Ii & 0x40 ) else False
if 100 - 100: oO0o + OoO0O00
if ( self . type == LISP_ECM ) :
self . ddt_bit = True if ( IIII11i1Ii & 0x04 ) else False
self . to_etr = True if ( IIII11i1Ii & 0x02 ) else False
self . to_ms = True if ( IIII11i1Ii & 0x01 ) else False
if 95 - 95: i11iIiiIii . o0oOOo0O0Ooo + OoooooooOO % Oo0Ooo
if ( self . type == LISP_NAT_INFO ) :
self . info_reply = True if ( IIII11i1Ii & 0x08 ) else False
if 21 - 21: iII111i - o0oOOo0O0Ooo / I11i % O0 / iIii1I11I1II1 / iII111i
return ( True )
if 1 - 1: Oo0Ooo . i11iIiiIii
if 9 - 9: OoooooooOO / I11i
def is_info_request ( self ) :
return ( ( self . type == LISP_NAT_INFO and self . is_info_reply ( ) == False ) )
if 47 - 47: OoooooooOO
if 48 - 48: OoOoOO00 . IiII % I1IiiI + I11i
def is_info_reply ( self ) :
return ( True if self . info_reply else False )
if 37 - 37: Oo0Ooo + I1Ii111 * oO0o / o0oOOo0O0Ooo
if 78 - 78: IiII + I11i - o0oOOo0O0Ooo + OoO0O00 / iIii1I11I1II1
def is_rloc_probe ( self ) :
return ( True if self . rloc_probe else False )
if 47 - 47: OOooOOo
if 20 - 20: I1Ii111 % ooOoO0o - I1Ii111 * OoooooooOO / I1ii11iIi11i
def is_smr ( self ) :
return ( True if self . smr_bit else False )
if 57 - 57: IiII % I11i * OOooOOo % I1ii11iIi11i
if 65 - 65: i1IIi - OoooooooOO
def is_smr_invoked ( self ) :
return ( True if self . smr_invoked_bit else False )
if 66 - 66: I1ii11iIi11i / i1IIi * I1IiiI - OoOoOO00 + oO0o
if 74 - 74: iII111i / I1Ii111 / II111iiii - iII111i / oO0o % I11i
def is_ddt ( self ) :
return ( True if self . ddt_bit else False )
if 19 - 19: IiII % OoooooooOO + OoooooooOO
if 7 - 7: i1IIi
def is_to_etr ( self ) :
return ( True if self . to_etr else False )
if 91 - 91: OoOoOO00 - OoOoOO00 . IiII
if 33 - 33: I1Ii111 - iIii1I11I1II1 / Ii1I % O0
def is_to_ms ( self ) :
return ( True if self . to_ms else False )
if 80 - 80: IiII % OoooooooOO - IiII
if 27 - 27: I1Ii111 - o0oOOo0O0Ooo * I1ii11iIi11i - I1IiiI
if 22 - 22: Oo0Ooo % OoooooooOO - Oo0Ooo - iII111i . Ii1I
if 100 - 100: II111iiii / I1Ii111 / iII111i - I1ii11iIi11i * iIii1I11I1II1
if 7 - 7: i1IIi . IiII % i11iIiiIii * I1ii11iIi11i . I11i % I1ii11iIi11i
if 35 - 35: I1IiiI
if 48 - 48: OoooooooOO % OoooooooOO - OoO0O00 . OoOoOO00
if 22 - 22: ooOoO0o . i11iIiiIii . OoooooooOO . i1IIi
if 12 - 12: OoOoOO00 % OOooOOo + oO0o . O0 % iIii1I11I1II1
if 41 - 41: OoooooooOO
if 13 - 13: I11i + I1Ii111 - I1Ii111 % oO0o / I11i
if 4 - 4: I1IiiI + OOooOOo - IiII + iII111i
if 78 - 78: Ii1I
if 29 - 29: II111iiii
if 79 - 79: iIii1I11I1II1 - i11iIiiIii + ooOoO0o - II111iiii . iIii1I11I1II1
if 84 - 84: Oo0Ooo % I11i * O0 * I11i
if 66 - 66: OOooOOo / iIii1I11I1II1 - OoOoOO00 % O0 . ooOoO0o
if 12 - 12: Oo0Ooo + I1IiiI
if 37 - 37: i1IIi * i11iIiiIii
if 95 - 95: i11iIiiIii % I1Ii111 * Oo0Ooo + i1IIi . O0 + I1ii11iIi11i
if 7 - 7: OoO0O00 * i11iIiiIii * iIii1I11I1II1 / OOooOOo / I1Ii111
if 35 - 35: iII111i * OOooOOo
if 65 - 65: II111iiii % i1IIi
if 13 - 13: OoO0O00 * I1Ii111 + Oo0Ooo - IiII
if 31 - 31: OoO0O00
if 68 - 68: OoO0O00 + i1IIi / iIii1I11I1II1 + II111iiii * iIii1I11I1II1 + I1ii11iIi11i
if 77 - 77: i11iIiiIii - I1Ii111 . I1ii11iIi11i % Oo0Ooo . Ii1I
if 9 - 9: o0oOOo0O0Ooo
if 55 - 55: OOooOOo % iIii1I11I1II1 + I11i . ooOoO0o
if 71 - 71: i11iIiiIii / i1IIi + OoOoOO00
if 23 - 23: i11iIiiIii
if 88 - 88: II111iiii - iII111i / OoooooooOO
if 71 - 71: I1ii11iIi11i
if 19 - 19: Oo0Ooo - OoO0O00 + i11iIiiIii / iIii1I11I1II1
if 1 - 1: IiII % i1IIi
if 41 - 41: OoO0O00 * OoO0O00 / iII111i + I1ii11iIi11i . o0oOOo0O0Ooo
if 84 - 84: i11iIiiIii + OoO0O00 * I1IiiI + I1ii11iIi11i / Ii1I
if 80 - 80: I1ii11iIi11i
if 67 - 67: II111iiii
if 2 - 2: o0oOOo0O0Ooo - O0 * Ii1I % IiII
if 64 - 64: i1IIi . ooOoO0o
if 7 - 7: oO0o . iII111i - iII111i / I1Ii111 % Oo0Ooo
if 61 - 61: oO0o - I1ii11iIi11i / iII111i % I1ii11iIi11i + OoO0O00 / Oo0Ooo
if 10 - 10: i11iIiiIii / OoOoOO00
if 27 - 27: I1IiiI / OoooooooOO
if 74 - 74: I1ii11iIi11i % I1Ii111 - OoO0O00 * I11i . OoooooooOO * OoO0O00
if 99 - 99: OoOoOO00 . iII111i - OoooooooOO - O0
if 6 - 6: OOooOOo
if 3 - 3: O0 - I1Ii111 * Ii1I * OOooOOo / Ii1I
class lisp_map_register ( ) :
def __init__ ( self ) :
self . proxy_reply_requested = False
self . lisp_sec_present = False
self . xtr_id_present = False
self . map_notify_requested = False
self . mobile_node = False
self . merge_register_requested = False
self . use_ttl_for_timeout = False
self . map_register_refresh = False
self . record_count = 0
self . nonce = 0
self . alg_id = 0
self . key_id = 0
self . auth_len = 0
self . auth_data = 0
self . xtr_id = 0
self . site_id = 0
self . record_count = 0
self . sport = 0
self . encrypt_bit = 0
self . encryption_key_id = None
if 58 - 58: Ii1I * iIii1I11I1II1 + ooOoO0o . ooOoO0o
if 74 - 74: ooOoO0o - o0oOOo0O0Ooo * IiII % ooOoO0o
def print_map_register ( self ) :
Oo0O0 = lisp_hex_string ( self . xtr_id )
if 36 - 36: I1ii11iIi11i * o0oOOo0O0Ooo + i11iIiiIii + OoooooooOO
IIIIIiI11Ii = ( "{} -> flags: {}{}{}{}{}{}{}{}{}, record-count: " +
"{}, nonce: 0x{}, key/alg-id: {}/{}{}, auth-len: {}, xtr-id: " +
"0x{}, site-id: {}" )
if 82 - 82: OoOoOO00 . OoOoOO00
lprint ( IIIIIiI11Ii . format ( bold ( "Map-Register" , False ) , "P" if self . proxy_reply_requested else "p" ,
# iII111i
"S" if self . lisp_sec_present else "s" ,
"I" if self . xtr_id_present else "i" ,
"T" if self . use_ttl_for_timeout else "t" ,
"R" if self . merge_register_requested else "r" ,
"M" if self . mobile_node else "m" ,
"N" if self . map_notify_requested else "n" ,
"F" if self . map_register_refresh else "f" ,
"E" if self . encrypt_bit else "e" ,
self . record_count , lisp_hex_string ( self . nonce ) , self . key_id ,
self . alg_id , " (sha1)" if ( self . key_id == LISP_SHA_1_96_ALG_ID ) else ( " (sha2)" if ( self . key_id == LISP_SHA_256_128_ALG_ID ) else "" ) , self . auth_len , Oo0O0 , self . site_id ) )
if 85 - 85: I1ii11iIi11i . oO0o . O0
if 16 - 16: I1ii11iIi11i % I1ii11iIi11i % I1Ii111 + I11i . I1Ii111 + OOooOOo
if 85 - 85: i11iIiiIii . I11i + Ii1I / Ii1I
if 43 - 43: IiII . OoooooooOO - II111iiii
def encode ( self ) :
O0oooOO = ( LISP_MAP_REGISTER << 28 ) | self . record_count
if ( self . proxy_reply_requested ) : O0oooOO |= 0x08000000
if ( self . lisp_sec_present ) : O0oooOO |= 0x04000000
if ( self . xtr_id_present ) : O0oooOO |= 0x02000000
if ( self . map_register_refresh ) : O0oooOO |= 0x1000
if ( self . use_ttl_for_timeout ) : O0oooOO |= 0x800
if ( self . merge_register_requested ) : O0oooOO |= 0x400
if ( self . mobile_node ) : O0oooOO |= 0x200
if ( self . map_notify_requested ) : O0oooOO |= 0x100
if ( self . encryption_key_id != None ) :
O0oooOO |= 0x2000
O0oooOO |= self . encryption_key_id << 14
if 90 - 90: I1IiiI - iIii1I11I1II1 + I1ii11iIi11i * OOooOOo * oO0o
if 19 - 19: I1Ii111 * II111iiii % Oo0Ooo - i1IIi
if 27 - 27: OoOoOO00 . O0 / I1ii11iIi11i . iIii1I11I1II1
if 15 - 15: Ii1I + OoO0O00 % iIii1I11I1II1 - I1ii11iIi11i - i1IIi % o0oOOo0O0Ooo
if 54 - 54: IiII - II111iiii . ooOoO0o + Ii1I
if ( self . alg_id == LISP_NONE_ALG_ID ) :
self . auth_len = 0
else :
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
self . auth_len = LISP_SHA1_160_AUTH_DATA_LEN
if 45 - 45: oO0o + II111iiii . iII111i / I1ii11iIi11i
if ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
self . auth_len = LISP_SHA2_256_AUTH_DATA_LEN
if 76 - 76: Ii1I + iII111i - IiII * iIii1I11I1II1 % i1IIi
if 72 - 72: ooOoO0o + II111iiii . O0 - iII111i / OoooooooOO . I1Ii111
if 28 - 28: iIii1I11I1II1 . O0
oOo = struct . pack ( "I" , socket . htonl ( O0oooOO ) )
oOo += struct . pack ( "QBBH" , self . nonce , self . key_id , self . alg_id ,
socket . htons ( self . auth_len ) )
if 32 - 32: OoooooooOO
oOo = self . zero_auth ( oOo )
return ( oOo )
if 29 - 29: I1ii11iIi11i
if 41 - 41: Ii1I
def zero_auth ( self , packet ) :
ii = struct . calcsize ( "I" ) + struct . calcsize ( "QHH" )
I1iiI1II11 = ""
ooooO000 = 0
if ( self . alg_id == LISP_NONE_ALG_ID ) : return ( packet )
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
I1iiI1II11 = struct . pack ( "QQI" , 0 , 0 , 0 )
ooooO000 = struct . calcsize ( "QQI" )
if 63 - 63: I1Ii111 - oO0o - iII111i - ooOoO0o / oO0o + OoO0O00
if ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
I1iiI1II11 = struct . pack ( "QQQQ" , 0 , 0 , 0 , 0 )
ooooO000 = struct . calcsize ( "QQQQ" )
if 94 - 94: IiII / I1IiiI . II111iiii
packet = packet [ 0 : ii ] + I1iiI1II11 + packet [ ii + ooooO000 : : ]
return ( packet )
if 32 - 32: oO0o . OOooOOo % OOooOOo . OoOoOO00
if 37 - 37: OOooOOo + O0 + OOooOOo . iII111i . o0oOOo0O0Ooo
def encode_auth ( self , packet ) :
ii = struct . calcsize ( "I" ) + struct . calcsize ( "QHH" )
ooooO000 = self . auth_len
I1iiI1II11 = self . auth_data
packet = packet [ 0 : ii ] + I1iiI1II11 + packet [ ii + ooooO000 : : ]
return ( packet )
if 78 - 78: I1IiiI / I11i + o0oOOo0O0Ooo . Oo0Ooo / O0
if 49 - 49: I1ii11iIi11i
def decode ( self , packet ) :
oOO = packet
IIiI1I11ii1i = "I"
i1II1i1iiI1 = struct . calcsize ( IIiI1I11ii1i )
if ( len ( packet ) < i1II1i1iiI1 ) : return ( [ None , None ] )
if 18 - 18: Oo0Ooo + IiII
O0oooOO = struct . unpack ( IIiI1I11ii1i , packet [ : i1II1i1iiI1 ] )
O0oooOO = socket . ntohl ( O0oooOO [ 0 ] )
packet = packet [ i1II1i1iiI1 : : ]
if 79 - 79: OoO0O00 - O0 + II111iiii % Ii1I . I1IiiI
IIiI1I11ii1i = "QBBH"
i1II1i1iiI1 = struct . calcsize ( IIiI1I11ii1i )
if ( len ( packet ) < i1II1i1iiI1 ) : return ( [ None , None ] )
if 43 - 43: I1IiiI % I1ii11iIi11i * Ii1I
self . nonce , self . key_id , self . alg_id , self . auth_len = struct . unpack ( IIiI1I11ii1i , packet [ : i1II1i1iiI1 ] )
if 31 - 31: Ii1I / iII111i
if 3 - 3: IiII
self . auth_len = socket . ntohs ( self . auth_len )
self . proxy_reply_requested = True if ( O0oooOO & 0x08000000 ) else False
if 37 - 37: Ii1I * OoooooooOO * I11i + Oo0Ooo . I1IiiI
self . lisp_sec_present = True if ( O0oooOO & 0x04000000 ) else False
self . xtr_id_present = True if ( O0oooOO & 0x02000000 ) else False
self . use_ttl_for_timeout = True if ( O0oooOO & 0x800 ) else False
self . map_register_refresh = True if ( O0oooOO & 0x1000 ) else False
self . merge_register_requested = True if ( O0oooOO & 0x400 ) else False
self . mobile_node = True if ( O0oooOO & 0x200 ) else False
self . map_notify_requested = True if ( O0oooOO & 0x100 ) else False
self . record_count = O0oooOO & 0xff
if 61 - 61: OOooOOo . OOooOOo
if 17 - 17: II111iiii / ooOoO0o
if 80 - 80: OOooOOo * OoO0O00 + Ii1I
if 62 - 62: OoooooooOO . O0 % Oo0Ooo
self . encrypt_bit = True if O0oooOO & 0x2000 else False
if ( self . encrypt_bit ) :
self . encryption_key_id = ( O0oooOO >> 14 ) & 0x7
if 98 - 98: o0oOOo0O0Ooo * Oo0Ooo - Ii1I . ooOoO0o
if 2 - 2: Oo0Ooo - ooOoO0o % iIii1I11I1II1
if 88 - 88: I1Ii111 - OoO0O00
if 79 - 79: iII111i
if 45 - 45: II111iiii + iII111i . I11i . O0 * i1IIi - Ii1I
if ( self . xtr_id_present ) :
if ( self . decode_xtr_id ( oOO ) == False ) : return ( [ None , None ] )
if 48 - 48: I1ii11iIi11i + Oo0Ooo
if 76 - 76: I1ii11iIi11i
packet = packet [ i1II1i1iiI1 : : ]
if 98 - 98: II111iiii + I1IiiI - I1ii11iIi11i . Ii1I
if 51 - 51: Ii1I + i11iIiiIii * OoO0O00 % Oo0Ooo / I1IiiI - iIii1I11I1II1
if 20 - 20: I1Ii111 . I11i . Ii1I + I11i - OOooOOo * oO0o
if 82 - 82: OoO0O00
if ( self . auth_len != 0 ) :
if ( len ( packet ) < self . auth_len ) : return ( [ None , None ] )
if 78 - 78: II111iiii / I11i - i11iIiiIii + I1ii11iIi11i * Oo0Ooo
if ( self . alg_id not in ( LISP_NONE_ALG_ID , LISP_SHA_1_96_ALG_ID ,
LISP_SHA_256_128_ALG_ID ) ) :
lprint ( "Invalid authentication alg-id: {}" . format ( self . alg_id ) )
return ( [ None , None ] )
if 17 - 17: OoOoOO00
if 72 - 72: iII111i . Oo0Ooo - i11iIiiIii / I1IiiI
ooooO000 = self . auth_len
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
i1II1i1iiI1 = struct . calcsize ( "QQI" )
if ( ooooO000 < i1II1i1iiI1 ) :
lprint ( "Invalid sha1-96 authentication length" )
return ( [ None , None ] )
if 64 - 64: oO0o
oOoOo00o00 , O0OOo00O , i1iI1iIIiIi1I = struct . unpack ( "QQI" , packet [ : ooooO000 ] )
I11iIiIII11 = ""
elif ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
i1II1i1iiI1 = struct . calcsize ( "QQQQ" )
if ( ooooO000 < i1II1i1iiI1 ) :
lprint ( "Invalid sha2-256 authentication length" )
return ( [ None , None ] )
if 81 - 81: I1ii11iIi11i + OoooooooOO - OOooOOo * O0
oOoOo00o00 , O0OOo00O , i1iI1iIIiIi1I , I11iIiIII11 = struct . unpack ( "QQQQ" ,
packet [ : ooooO000 ] )
else :
lprint ( "Unsupported authentication alg-id value {}" . format ( self . alg_id ) )
if 100 - 100: iIii1I11I1II1 - OoOoOO00
return ( [ None , None ] )
if 28 - 28: Oo0Ooo . O0 . I11i
self . auth_data = lisp_concat_auth_data ( self . alg_id , oOoOo00o00 , O0OOo00O ,
i1iI1iIIiIi1I , I11iIiIII11 )
oOO = self . zero_auth ( oOO )
packet = packet [ self . auth_len : : ]
if 60 - 60: II111iiii + I1Ii111 / oO0o % OoooooooOO - i1IIi
return ( [ oOO , packet ] )
if 57 - 57: ooOoO0o
if 99 - 99: Oo0Ooo + I1Ii111 % ooOoO0o - o0oOOo0O0Ooo
def encode_xtr_id ( self , packet ) :
o0oo0oo0 = self . xtr_id >> 64
IIi1II = self . xtr_id & 0xffffffffffffffff
o0oo0oo0 = byte_swap_64 ( o0oo0oo0 )
IIi1II = byte_swap_64 ( IIi1II )
OOOoooO0o0o = byte_swap_64 ( self . site_id )
packet += struct . pack ( "QQQ" , o0oo0oo0 , IIi1II , OOOoooO0o0o )
return ( packet )
if 56 - 56: oO0o - o0oOOo0O0Ooo . OoOoOO00 . Ii1I + oO0o * OoooooooOO
if 31 - 31: iII111i - i11iIiiIii % Ii1I / iII111i . OoooooooOO + Oo0Ooo
def decode_xtr_id ( self , packet ) :
i1II1i1iiI1 = struct . calcsize ( "QQQ" )
if ( len ( packet ) < i1II1i1iiI1 ) : return ( [ None , None ] )
packet = packet [ len ( packet ) - i1II1i1iiI1 : : ]
o0oo0oo0 , IIi1II , OOOoooO0o0o = struct . unpack ( "QQQ" ,
packet [ : i1II1i1iiI1 ] )
o0oo0oo0 = byte_swap_64 ( o0oo0oo0 )
IIi1II = byte_swap_64 ( IIi1II )
self . xtr_id = ( o0oo0oo0 << 64 ) | IIi1II
self . site_id = byte_swap_64 ( OOOoooO0o0o )
return ( True )
if 82 - 82: I1ii11iIi11i * O0 + OOooOOo . ooOoO0o + OoO0O00 % O0
if 2 - 2: II111iiii * O0 . ooOoO0o * i1IIi
if 29 - 29: iIii1I11I1II1 - I1Ii111 - Ii1I - o0oOOo0O0Ooo + i11iIiiIii
if 78 - 78: o0oOOo0O0Ooo + iIii1I11I1II1 / I1ii11iIi11i - OoooooooOO - oO0o
if 79 - 79: I1ii11iIi11i - oO0o - o0oOOo0O0Ooo . OOooOOo
if 65 - 65: i11iIiiIii . OoO0O00 % iII111i + IiII - i11iIiiIii
if 60 - 60: I1Ii111
if 14 - 14: Oo0Ooo % oO0o * iII111i - i11iIiiIii / I1ii11iIi11i * i11iIiiIii
if 95 - 95: iIii1I11I1II1 + OoOoOO00 . I1IiiI + OoOoOO00 * I11i + OOooOOo
if 14 - 14: Ii1I - O0
if 68 - 68: II111iiii - I1ii11iIi11i - OoO0O00 * iIii1I11I1II1 / I1IiiI * I1ii11iIi11i
if 45 - 45: I1Ii111 * I11i / iIii1I11I1II1 / I1IiiI % II111iiii
if 49 - 49: Ii1I / iII111i . iII111i . iII111i + i11iIiiIii % I11i
if 7 - 7: IiII * ooOoO0o + OoOoOO00
if 22 - 22: iII111i
if 48 - 48: I1ii11iIi11i . I1IiiI
if 73 - 73: O0 . I1Ii111 - OoooooooOO % I11i % i1IIi
if 14 - 14: I1Ii111 + Ii1I * Oo0Ooo
if 49 - 49: Oo0Ooo
if 57 - 57: O0 * ooOoO0o - iII111i - iIii1I11I1II1 * iII111i
if 9 - 9: IiII . I11i
if 23 - 23: O0 % OoooooooOO - O0 . I1IiiI + i11iIiiIii
if 96 - 96: ooOoO0o % O0
if 51 - 51: I1IiiI - iII111i / I1ii11iIi11i . I1ii11iIi11i + I1ii11iIi11i
if 87 - 87: II111iiii . Ii1I * OoO0O00
if 74 - 74: o0oOOo0O0Ooo % OoOoOO00 . iII111i % I1Ii111 . O0 % II111iiii
if 5 - 5: oO0o - OoooooooOO / OoOoOO00
if 30 - 30: I11i % o0oOOo0O0Ooo + i1IIi * OoooooooOO * OoO0O00 - II111iiii
if 55 - 55: OoO0O00
if 20 - 20: ooOoO0o * I1Ii111 * o0oOOo0O0Ooo - ooOoO0o
if 32 - 32: Ii1I * oO0o
if 85 - 85: i11iIiiIii . OoO0O00 + OoO0O00
if 28 - 28: Oo0Ooo
class lisp_map_notify ( ) :
def __init__ ( self , lisp_sockets ) :
self . etr = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . etr_port = 0
self . retransmit_timer = None
self . lisp_sockets = lisp_sockets
self . retry_count = 0
self . record_count = 0
self . alg_id = LISP_NONE_ALG_ID
self . key_id = 0
self . auth_len = 0
self . auth_data = ""
self . nonce = 0
self . nonce_key = ""
self . packet = None
self . site = ""
self . map_notify_ack = False
self . eid_records = ""
self . eid_list = [ ]
if 62 - 62: Oo0Ooo + OoooooooOO / iII111i
if 60 - 60: Ii1I / OoOoOO00 . I11i % OOooOOo
def print_notify ( self ) :
I1iiI1II11 = binascii . hexlify ( self . auth_data )
if ( self . alg_id == LISP_SHA_1_96_ALG_ID and len ( I1iiI1II11 ) != 40 ) :
I1iiI1II11 = self . auth_data
elif ( self . alg_id == LISP_SHA_256_128_ALG_ID and len ( I1iiI1II11 ) != 64 ) :
I1iiI1II11 = self . auth_data
if 61 - 61: O0 . Ii1I . O0 * i11iIiiIii * II111iiii / I1Ii111
IIIIIiI11Ii = ( "{} -> record-count: {}, nonce: 0x{}, key/alg-id: " +
"{}{}{}, auth-len: {}, auth-data: {}" )
lprint ( IIIIIiI11Ii . format ( bold ( "Map-Notify-Ack" , False ) if self . map_notify_ack else bold ( "Map-Notify" , False ) ,
# I11i . i11iIiiIii / Oo0Ooo % iII111i / I1Ii111
self . record_count , lisp_hex_string ( self . nonce ) , self . key_id ,
self . alg_id , " (sha1)" if ( self . key_id == LISP_SHA_1_96_ALG_ID ) else ( " (sha2)" if ( self . key_id == LISP_SHA_256_128_ALG_ID ) else "" ) , self . auth_len , I1iiI1II11 ) )
if 70 - 70: OoooooooOO
if 1 - 1: iIii1I11I1II1
if 44 - 44: I1ii11iIi11i % IiII
if 6 - 6: OoO0O00
def zero_auth ( self , packet ) :
if ( self . alg_id == LISP_NONE_ALG_ID ) : return ( packet )
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
I1iiI1II11 = struct . pack ( "QQI" , 0 , 0 , 0 )
if 82 - 82: iIii1I11I1II1 . I11i / IiII / OOooOOo * II111iiii % oO0o
if ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
I1iiI1II11 = struct . pack ( "QQQQ" , 0 , 0 , 0 , 0 )
if 62 - 62: II111iiii
packet += I1iiI1II11
return ( packet )
if 96 - 96: I11i % OoOoOO00 * I1ii11iIi11i
if 94 - 94: Oo0Ooo - i1IIi . O0 % Oo0Ooo . ooOoO0o
def encode ( self , eid_records , password ) :
if ( self . map_notify_ack ) :
O0oooOO = ( LISP_MAP_NOTIFY_ACK << 28 ) | self . record_count
else :
O0oooOO = ( LISP_MAP_NOTIFY << 28 ) | self . record_count
if 63 - 63: i11iIiiIii % I1ii11iIi11i % I1IiiI . IiII * o0oOOo0O0Ooo + OOooOOo
oOo = struct . pack ( "I" , socket . htonl ( O0oooOO ) )
oOo += struct . pack ( "QBBH" , self . nonce , self . key_id , self . alg_id ,
socket . htons ( self . auth_len ) )
if 77 - 77: o0oOOo0O0Ooo
if ( self . alg_id == LISP_NONE_ALG_ID ) :
self . packet = oOo + eid_records
return ( self . packet )
if 63 - 63: ooOoO0o * oO0o + ooOoO0o * Ii1I + Oo0Ooo / I1ii11iIi11i
if 15 - 15: O0 . I1ii11iIi11i * I1ii11iIi11i
if 65 - 65: I1Ii111 + O0 % o0oOOo0O0Ooo
if 72 - 72: OOooOOo . OoOoOO00 / II111iiii
if 69 - 69: OOooOOo * II111iiii - ooOoO0o - i1IIi + i11iIiiIii
oOo = self . zero_auth ( oOo )
oOo += eid_records
if 50 - 50: OoooooooOO * i1IIi / oO0o
ooo000 = lisp_hash_me ( oOo , self . alg_id , password , False )
if 83 - 83: i1IIi
ii = struct . calcsize ( "I" ) + struct . calcsize ( "QHH" )
ooooO000 = self . auth_len
self . auth_data = ooo000
oOo = oOo [ 0 : ii ] + ooo000 + oOo [ ii + ooooO000 : : ]
self . packet = oOo
return ( oOo )
if 38 - 38: OoooooooOO * iIii1I11I1II1
if 54 - 54: OoooooooOO . I1Ii111
def decode ( self , packet ) :
oOO = packet
IIiI1I11ii1i = "I"
i1II1i1iiI1 = struct . calcsize ( IIiI1I11ii1i )
if ( len ( packet ) < i1II1i1iiI1 ) : return ( None )
if 71 - 71: Ii1I
O0oooOO = struct . unpack ( IIiI1I11ii1i , packet [ : i1II1i1iiI1 ] )
O0oooOO = socket . ntohl ( O0oooOO [ 0 ] )
self . map_notify_ack = ( ( O0oooOO >> 28 ) == LISP_MAP_NOTIFY_ACK )
self . record_count = O0oooOO & 0xff
packet = packet [ i1II1i1iiI1 : : ]
if 31 - 31: I11i . i11iIiiIii . OoO0O00 * Oo0Ooo % Ii1I . o0oOOo0O0Ooo
IIiI1I11ii1i = "QBBH"
i1II1i1iiI1 = struct . calcsize ( IIiI1I11ii1i )
if ( len ( packet ) < i1II1i1iiI1 ) : return ( None )
if 92 - 92: OoooooooOO / O0 * i1IIi + iIii1I11I1II1
self . nonce , self . key_id , self . alg_id , self . auth_len = struct . unpack ( IIiI1I11ii1i , packet [ : i1II1i1iiI1 ] )
if 93 - 93: ooOoO0o % I1Ii111
self . nonce_key = lisp_hex_string ( self . nonce )
self . auth_len = socket . ntohs ( self . auth_len )
packet = packet [ i1II1i1iiI1 : : ]
self . eid_records = packet [ self . auth_len : : ]
if 46 - 46: I1ii11iIi11i * OoOoOO00 * IiII * I1ii11iIi11i . I1ii11iIi11i
if ( self . auth_len == 0 ) : return ( self . eid_records )
if 43 - 43: ooOoO0o . i1IIi
if 68 - 68: IiII % Oo0Ooo . O0 - OoOoOO00 + I1ii11iIi11i . i11iIiiIii
if 45 - 45: I1IiiI
if 17 - 17: OoooooooOO - ooOoO0o + Ii1I . OoooooooOO % Oo0Ooo
if ( len ( packet ) < self . auth_len ) : return ( None )
if 92 - 92: I1Ii111 - OOooOOo % OoO0O00 - o0oOOo0O0Ooo % i1IIi
ooooO000 = self . auth_len
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
oOoOo00o00 , O0OOo00O , i1iI1iIIiIi1I = struct . unpack ( "QQI" , packet [ : ooooO000 ] )
I11iIiIII11 = ""
if 38 - 38: I1ii11iIi11i . I11i / OoOoOO00 % I11i
if ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
oOoOo00o00 , O0OOo00O , i1iI1iIIiIi1I , I11iIiIII11 = struct . unpack ( "QQQQ" ,
packet [ : ooooO000 ] )
if 10 - 10: O0 . I1IiiI * o0oOOo0O0Ooo / iII111i
self . auth_data = lisp_concat_auth_data ( self . alg_id , oOoOo00o00 , O0OOo00O ,
i1iI1iIIiIi1I , I11iIiIII11 )
if 61 - 61: Oo0Ooo - I1Ii111
i1II1i1iiI1 = struct . calcsize ( "I" ) + struct . calcsize ( "QHH" )
packet = self . zero_auth ( oOO [ : i1II1i1iiI1 ] )
i1II1i1iiI1 += ooooO000
packet += oOO [ i1II1i1iiI1 : : ]
return ( packet )
if 51 - 51: iII111i * ooOoO0o / O0 / O0
if 52 - 52: OoooooooOO % O0
if 56 - 56: oO0o - i1IIi * OoooooooOO - II111iiii
if 28 - 28: i1IIi / I11i . o0oOOo0O0Ooo
if 11 - 11: Oo0Ooo * OoooooooOO - i11iIiiIii
if 13 - 13: i11iIiiIii . O0 / OOooOOo * i1IIi
if 14 - 14: IiII + IiII . I11i / Ii1I . iIii1I11I1II1
if 10 - 10: II111iiii . OOooOOo / iII111i
if 35 - 35: iII111i / Oo0Ooo + O0 * iIii1I11I1II1 - O0
if 3 - 3: I1ii11iIi11i
if 42 - 42: I11i % Oo0Ooo + IiII - I11i . iIii1I11I1II1 - Ii1I
if 27 - 27: iII111i % Oo0Ooo . I1ii11iIi11i . i1IIi % OoOoOO00 . o0oOOo0O0Ooo
if 37 - 37: iII111i + I1Ii111 * Ii1I + IiII
if 39 - 39: O0 * Oo0Ooo - I1IiiI + Ii1I / II111iiii
if 66 - 66: ooOoO0o + oO0o % OoooooooOO
if 23 - 23: oO0o . OoOoOO00 + iIii1I11I1II1
if 17 - 17: IiII
if 12 - 12: i1IIi . OoO0O00
if 14 - 14: OOooOOo + II111iiii % OOooOOo . oO0o * ooOoO0o
if 54 - 54: ooOoO0o * I11i - I1Ii111
if 15 - 15: iII111i / O0
if 61 - 61: i1IIi / i1IIi + ooOoO0o . I1Ii111 * ooOoO0o
if 19 - 19: o0oOOo0O0Ooo . II111iiii / i1IIi
if 82 - 82: O0 / iII111i * OoO0O00 - I11i + Oo0Ooo
if 47 - 47: I1ii11iIi11i * I1IiiI / I1ii11iIi11i + Ii1I * II111iiii
if 78 - 78: I1Ii111 - i1IIi + OoOoOO00 + Oo0Ooo * I1ii11iIi11i * o0oOOo0O0Ooo
if 97 - 97: i1IIi
if 29 - 29: I1IiiI
if 37 - 37: I1ii11iIi11i * I1Ii111 * I1IiiI * O0
if 35 - 35: I1IiiI - I1ii11iIi11i * iII111i + IiII / i1IIi
if 46 - 46: Oo0Ooo . ooOoO0o % Oo0Ooo / II111iiii * ooOoO0o * OOooOOo
if 59 - 59: I1Ii111 * iII111i
if 31 - 31: I11i / O0
if 57 - 57: i1IIi % ooOoO0o
if 69 - 69: o0oOOo0O0Ooo
if 69 - 69: I1Ii111
if 83 - 83: iIii1I11I1II1 . o0oOOo0O0Ooo + I1Ii111 . OoooooooOO / ooOoO0o + II111iiii
if 90 - 90: Ii1I * iII111i / OOooOOo
if 68 - 68: OoOoOO00
if 65 - 65: oO0o
if 82 - 82: o0oOOo0O0Ooo
if 80 - 80: i1IIi % OoOoOO00 + OoO0O00 - OoooooooOO / iIii1I11I1II1 + I1Ii111
if 65 - 65: Ii1I
if 71 - 71: I1Ii111 % I1Ii111 . oO0o + i11iIiiIii - i11iIiiIii
if 16 - 16: iIii1I11I1II1 / I1IiiI / I1Ii111 - i11iIiiIii . ooOoO0o / OOooOOo
if 13 - 13: o0oOOo0O0Ooo % O0 - I1Ii111 * OoooooooOO / Oo0Ooo - OoooooooOO
if 78 - 78: oO0o % OoooooooOO
if 73 - 73: I1IiiI % ooOoO0o % IiII + i1IIi - OoooooooOO / oO0o
if 78 - 78: OoooooooOO % oO0o - i11iIiiIii
if 37 - 37: IiII % Ii1I % i1IIi
class lisp_map_request ( ) :
def __init__ ( self ) :
self . auth_bit = False
self . map_data_present = False
self . rloc_probe = False
self . smr_bit = False
self . pitr_bit = False
self . smr_invoked_bit = False
self . mobile_node = False
self . xtr_id_present = False
self . local_xtr = False
self . dont_reply_bit = False
self . itr_rloc_count = 0
self . record_count = 0
self . nonce = 0
self . signature_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . source_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . target_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . target_group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . itr_rlocs = [ ]
self . keys = None
self . privkey_filename = None
self . map_request_signature = None
self . subscribe_bit = False
self . xtr_id = None
if 23 - 23: ooOoO0o - O0 + i11iIiiIii
if 98 - 98: OoooooooOO
def print_prefix ( self ) :
if ( self . target_group . is_null ( ) ) :
return ( green ( self . target_eid . print_prefix ( ) , False ) )
if 61 - 61: o0oOOo0O0Ooo . IiII . O0 + OoooooooOO + O0
return ( green ( self . target_eid . print_sg ( self . target_group ) , False ) )
if 65 - 65: i1IIi * OOooOOo * OoooooooOO - IiII . iII111i - OoO0O00
if 71 - 71: Ii1I * OoOoOO00
def print_map_request ( self ) :
Oo0O0 = ""
if ( self . xtr_id != None and self . subscribe_bit ) :
Oo0O0 = "subscribe, xtr-id: 0x{}, " . format ( lisp_hex_string ( self . xtr_id ) )
if 33 - 33: i1IIi . i1IIi * OoooooooOO % I1Ii111 * o0oOOo0O0Ooo
if 64 - 64: ooOoO0o / ooOoO0o + I1ii11iIi11i * OOooOOo % OOooOOo
if 87 - 87: OoO0O00 * Oo0Ooo
IIIIIiI11Ii = ( "{} -> flags: {}{}{}{}{}{}{}{}{}{}, itr-rloc-" +
"count: {} (+1), record-count: {}, nonce: 0x{}, source-eid: " +
"afi {}, {}{}, target-eid: afi {}, {}, {}ITR-RLOCs:" )
if 83 - 83: i1IIi * I1Ii111 - IiII / Ii1I
lprint ( IIIIIiI11Ii . format ( bold ( "Map-Request" , False ) , "A" if self . auth_bit else "a" ,
# O0 % o0oOOo0O0Ooo - II111iiii
"D" if self . map_data_present else "d" ,
"R" if self . rloc_probe else "r" ,
"S" if self . smr_bit else "s" ,
"P" if self . pitr_bit else "p" ,
"I" if self . smr_invoked_bit else "i" ,
"M" if self . mobile_node else "m" ,
"X" if self . xtr_id_present else "x" ,
"L" if self . local_xtr else "l" ,
"D" if self . dont_reply_bit else "d" , self . itr_rloc_count ,
self . record_count , lisp_hex_string ( self . nonce ) ,
self . source_eid . afi , green ( self . source_eid . print_address ( ) , False ) ,
" (with sig)" if self . map_request_signature != None else "" ,
self . target_eid . afi , green ( self . print_prefix ( ) , False ) , Oo0O0 ) )
if 69 - 69: i1IIi . I1IiiI + IiII
i1iIi = self . keys
for OooOoOOo0 in self . itr_rlocs :
lprint ( " itr-rloc: afi {} {}{}" . format ( OooOoOOo0 . afi ,
red ( OooOoOOo0 . print_address_no_iid ( ) , False ) ,
"" if ( i1iIi == None ) else ", " + i1iIi [ 1 ] . print_keys ( ) ) )
i1iIi = None
if 67 - 67: OoOoOO00 % Oo0Ooo
if 7 - 7: i11iIiiIii % I1ii11iIi11i / I1Ii111 % Oo0Ooo - OoO0O00
if 73 - 73: I1ii11iIi11i
def sign_map_request ( self , privkey ) :
oo0o0Oo = self . signature_eid . print_address ( )
IiIiii = self . source_eid . print_address ( )
iIooo0O0O0OO = self . target_eid . print_address ( )
oOooOOoO = lisp_hex_string ( self . nonce ) + IiIiii + iIooo0O0O0OO
self . map_request_signature = privkey . sign ( oOooOOoO )
o0o000OOO = binascii . b2a_base64 ( self . map_request_signature )
o0o000OOO = { "source-eid" : IiIiii , "signature-eid" : oo0o0Oo ,
"signature" : o0o000OOO }
return ( json . dumps ( o0o000OOO ) )
if 36 - 36: I1Ii111 * I1Ii111 % I1IiiI % O0 . I1IiiI % OoooooooOO
if 96 - 96: oO0o % iIii1I11I1II1 / iIii1I11I1II1 . iII111i . Ii1I
def verify_map_request_sig ( self , pubkey ) :
iII1I1iIIIiII = green ( self . signature_eid . print_address ( ) , False )
if ( pubkey == None ) :
lprint ( "Public-key not found for signature-EID {}" . format ( iII1I1iIIIiII ) )
return ( False )
if 41 - 41: I1Ii111 - O0 * Oo0Ooo % I1IiiI
if 70 - 70: IiII
IiIiii = self . source_eid . print_address ( )
iIooo0O0O0OO = self . target_eid . print_address ( )
oOooOOoO = lisp_hex_string ( self . nonce ) + IiIiii + iIooo0O0O0OO
pubkey = binascii . a2b_base64 ( pubkey )
if 4 - 4: OOooOOo + i11iIiiIii + I11i
O0OoOOo0o = True
try :
Iiii11 = ecdsa . VerifyingKey . from_pem ( pubkey )
except :
lprint ( "Invalid public-key in mapping system for sig-eid {}" . format ( self . signature_eid . print_address_no_iid ( ) ) )
if 21 - 21: I11i - I1IiiI / OoooooooOO . i1IIi + II111iiii
O0OoOOo0o = False
if 99 - 99: I1Ii111 - I1ii11iIi11i - I1IiiI - I1Ii111 + OoO0O00 + II111iiii
if 34 - 34: I1Ii111 * I11i
if ( O0OoOOo0o ) :
try :
O0OoOOo0o = Iiii11 . verify ( self . map_request_signature , oOooOOoO )
except :
O0OoOOo0o = False
if 31 - 31: IiII . oO0o
if 40 - 40: Ii1I - I11i / II111iiii * i1IIi + IiII * II111iiii
if 53 - 53: I1ii11iIi11i - i11iIiiIii . OoO0O00 / OoOoOO00 - I1Ii111
O0O0oooo = bold ( "passed" if O0OoOOo0o else "failed" , False )
lprint ( "Signature verification {} for EID {}" . format ( O0O0oooo , iII1I1iIIIiII ) )
return ( O0OoOOo0o )
if 90 - 90: OOooOOo . OoOoOO00 . I1IiiI . IiII
if 52 - 52: Ii1I - Oo0Ooo
def encode ( self , probe_dest , probe_port ) :
O0oooOO = ( LISP_MAP_REQUEST << 28 ) | self . record_count
O0oooOO = O0oooOO | ( self . itr_rloc_count << 8 )
if ( self . auth_bit ) : O0oooOO |= 0x08000000
if ( self . map_data_present ) : O0oooOO |= 0x04000000
if ( self . rloc_probe ) : O0oooOO |= 0x02000000
if ( self . smr_bit ) : O0oooOO |= 0x01000000
if ( self . pitr_bit ) : O0oooOO |= 0x00800000
if ( self . smr_invoked_bit ) : O0oooOO |= 0x00400000
if ( self . mobile_node ) : O0oooOO |= 0x00200000
if ( self . xtr_id_present ) : O0oooOO |= 0x00100000
if ( self . local_xtr ) : O0oooOO |= 0x00004000
if ( self . dont_reply_bit ) : O0oooOO |= 0x00002000
if 48 - 48: iIii1I11I1II1 * i11iIiiIii / OoO0O00 / I1IiiI
oOo = struct . pack ( "I" , socket . htonl ( O0oooOO ) )
oOo += struct . pack ( "Q" , self . nonce )
if 93 - 93: oO0o
if 57 - 57: I11i . iIii1I11I1II1 + I11i . IiII + IiII
if 53 - 53: I1ii11iIi11i / iII111i - I1ii11iIi11i * OoO0O00
if 81 - 81: I1Ii111 - Oo0Ooo / II111iiii / Oo0Ooo / i1IIi . o0oOOo0O0Ooo
if 38 - 38: OoooooooOO / OoooooooOO % iIii1I11I1II1 % OoooooooOO * OoooooooOO + OoO0O00
if 66 - 66: i1IIi
I11I11i1 = False
O0Oo00o0oO = self . privkey_filename
if ( O0Oo00o0oO != None and os . path . exists ( O0Oo00o0oO ) ) :
Iiooo000o0OoOo = open ( O0Oo00o0oO , "r" ) ; Iiii11 = Iiooo000o0OoOo . read ( ) ; Iiooo000o0OoOo . close ( )
try :
Iiii11 = ecdsa . SigningKey . from_pem ( Iiii11 )
except :
return ( None )
if 76 - 76: Ii1I % iIii1I11I1II1 / oO0o * iIii1I11I1II1 / iIii1I11I1II1
I1ii = self . sign_map_request ( Iiii11 )
I11I11i1 = True
elif ( self . map_request_signature != None ) :
o0o000OOO = binascii . b2a_base64 ( self . map_request_signature )
I1ii = { "source-eid" : self . source_eid . print_address ( ) ,
"signature-eid" : self . signature_eid . print_address ( ) ,
"signature" : o0o000OOO }
I1ii = json . dumps ( I1ii )
I11I11i1 = True
if 26 - 26: OOooOOo . OoO0O00 % OoOoOO00
if ( I11I11i1 ) :
o0O00o0o = LISP_LCAF_JSON_TYPE
ooOO0o0ooOo0 = socket . htons ( LISP_AFI_LCAF )
i11iii11 = socket . htons ( len ( I1ii ) + 2 )
I11111i = socket . htons ( len ( I1ii ) )
oOo += struct . pack ( "HBBBBHH" , ooOO0o0ooOo0 , 0 , 0 , o0O00o0o , 0 ,
i11iii11 , I11111i )
oOo += I1ii
oOo += struct . pack ( "H" , 0 )
else :
if ( self . source_eid . instance_id != 0 ) :
oOo += struct . pack ( "H" , socket . htons ( LISP_AFI_LCAF ) )
oOo += self . source_eid . lcaf_encode_iid ( )
else :
oOo += struct . pack ( "H" , socket . htons ( self . source_eid . afi ) )
oOo += self . source_eid . pack_address ( )
if 46 - 46: OoooooooOO
if 80 - 80: O0 * iII111i
if 73 - 73: IiII / Ii1I + I1Ii111 . OOooOOo - II111iiii / iIii1I11I1II1
if 79 - 79: I1Ii111 * Oo0Ooo . o0oOOo0O0Ooo - I1Ii111
if 16 - 16: I1IiiI - O0 * I1ii11iIi11i . I1ii11iIi11i % OOooOOo
if 39 - 39: II111iiii / I11i - OoOoOO00 * OoOoOO00 - Ii1I
if 8 - 8: O0 . i11iIiiIii
if ( probe_dest ) :
if ( probe_port == 0 ) : probe_port = LISP_DATA_PORT
ooOOo0o = probe_dest . print_address_no_iid ( ) + ":" + str ( probe_port )
if 54 - 54: OOooOOo . I1ii11iIi11i * I11i % I1Ii111 . O0 * IiII
if ( lisp_crypto_keys_by_rloc_encap . has_key ( ooOOo0o ) ) :
self . keys = lisp_crypto_keys_by_rloc_encap [ ooOOo0o ]
if 87 - 87: Ii1I % I1ii11iIi11i * Oo0Ooo
if 59 - 59: Oo0Ooo / I11i - iIii1I11I1II1 * iIii1I11I1II1
if 18 - 18: I11i * I1ii11iIi11i / i11iIiiIii / iIii1I11I1II1 * OoooooooOO . OOooOOo
if 69 - 69: Oo0Ooo * ooOoO0o
if 91 - 91: o0oOOo0O0Ooo . ooOoO0o / OoO0O00 / i11iIiiIii * o0oOOo0O0Ooo
if 52 - 52: I1IiiI - i11iIiiIii / IiII . oO0o
if 38 - 38: oO0o + OoooooooOO * OoOoOO00 % oO0o
for OooOoOOo0 in self . itr_rlocs :
if ( lisp_data_plane_security and self . itr_rlocs . index ( OooOoOOo0 ) == 0 ) :
if ( self . keys == None or self . keys [ 1 ] == None ) :
i1iIi = lisp_keys ( 1 )
self . keys = [ None , i1iIi , None , None ]
if 91 - 91: i1IIi - I1ii11iIi11i * I1IiiI
i1iIi = self . keys [ 1 ]
i1iIi . add_key_by_nonce ( self . nonce )
oOo += i1iIi . encode_lcaf ( OooOoOOo0 )
else :
oOo += struct . pack ( "H" , socket . htons ( OooOoOOo0 . afi ) )
oOo += OooOoOOo0 . pack_address ( )
if 24 - 24: OoOoOO00 * Ii1I
if 17 - 17: OoO0O00 . I1IiiI * O0
if 81 - 81: OOooOOo
Ooo = 0 if self . target_eid . is_binary ( ) == False else self . target_eid . mask_len
if 93 - 93: i1IIi % I1IiiI . I11i % OoO0O00 + I11i + OoooooooOO
if 41 - 41: OOooOOo % i11iIiiIii * I1IiiI + o0oOOo0O0Ooo / oO0o
ooO0i11i1i1i = 0
if ( self . subscribe_bit ) :
ooO0i11i1i1i = 0x80
self . xtr_id_present = True
if ( self . xtr_id == None ) :
self . xtr_id = random . randint ( 0 , ( 2 ** 128 ) - 1 )
if 83 - 83: II111iiii + IiII - o0oOOo0O0Ooo % o0oOOo0O0Ooo * o0oOOo0O0Ooo
if 100 - 100: Ii1I . iIii1I11I1II1
if 33 - 33: I1IiiI . iIii1I11I1II1 / i11iIiiIii * Ii1I
IIiI1I11ii1i = "BB"
oOo += struct . pack ( IIiI1I11ii1i , ooO0i11i1i1i , Ooo )
if 18 - 18: OoOoOO00 * OoOoOO00 - o0oOOo0O0Ooo % ooOoO0o % II111iiii - IiII
if ( self . target_group . is_null ( ) == False ) :
oOo += struct . pack ( "H" , socket . htons ( LISP_AFI_LCAF ) )
oOo += self . target_eid . lcaf_encode_sg ( self . target_group )
elif ( self . target_eid . instance_id != 0 or
self . target_eid . is_geo_prefix ( ) ) :
oOo += struct . pack ( "H" , socket . htons ( LISP_AFI_LCAF ) )
oOo += self . target_eid . lcaf_encode_iid ( )
else :
oOo += struct . pack ( "H" , socket . htons ( self . target_eid . afi ) )
oOo += self . target_eid . pack_address ( )
if 75 - 75: OoO0O00 . II111iiii . oO0o / OoO0O00 % iIii1I11I1II1
if 8 - 8: O0 / II111iiii
if 62 - 62: iIii1I11I1II1 % I1Ii111 % I1ii11iIi11i * IiII
if 87 - 87: IiII
if 45 - 45: oO0o + II111iiii * O0 % OOooOOo . iIii1I11I1II1
if ( self . subscribe_bit ) : oOo = self . encode_xtr_id ( oOo )
return ( oOo )
if 55 - 55: IiII
if 43 - 43: OOooOOo
def lcaf_decode_json ( self , packet ) :
IIiI1I11ii1i = "BBBBHH"
i1II1i1iiI1 = struct . calcsize ( IIiI1I11ii1i )
if ( len ( packet ) < i1II1i1iiI1 ) : return ( None )
if 17 - 17: i11iIiiIii
OoO0oOoo , I11I , o0O00o0o , ii11iIII111 , i11iii11 , I11111i = struct . unpack ( IIiI1I11ii1i , packet [ : i1II1i1iiI1 ] )
if 86 - 86: IiII - IiII
if 51 - 51: IiII % iII111i / I11i + oO0o - ooOoO0o * I1Ii111
if ( o0O00o0o != LISP_LCAF_JSON_TYPE ) : return ( packet )
if 76 - 76: OoOoOO00 / I1ii11iIi11i / iIii1I11I1II1 * OoooooooOO * OOooOOo
if 80 - 80: oO0o / O0
if 55 - 55: I1IiiI * I11i / O0 % OoOoOO00
if 71 - 71: i11iIiiIii * OoOoOO00 * OOooOOo + oO0o + Oo0Ooo
i11iii11 = socket . ntohs ( i11iii11 )
I11111i = socket . ntohs ( I11111i )
packet = packet [ i1II1i1iiI1 : : ]
if ( len ( packet ) < i11iii11 ) : return ( None )
if ( i11iii11 != I11111i + 2 ) : return ( None )
if 59 - 59: IiII
if 54 - 54: OOooOOo
if 27 - 27: OoOoOO00 - OoO0O00 + o0oOOo0O0Ooo + ooOoO0o . OoO0O00
if 86 - 86: II111iiii - OoooooooOO - ooOoO0o % iII111i
try :
I1ii = json . loads ( packet [ 0 : I11111i ] )
except :
return ( None )
if 16 - 16: ooOoO0o + Oo0Ooo + OoooooooOO
packet = packet [ I11111i : : ]
if 87 - 87: I1IiiI . oO0o / IiII - OoooooooOO
if 33 - 33: oO0o % OoO0O00 . iIii1I11I1II1 / IiII
if 3 - 3: Ii1I + OoO0O00
if 60 - 60: OoO0O00 . OoOoOO00 - I1ii11iIi11i - I1IiiI - II111iiii % Oo0Ooo
IIiI1I11ii1i = "H"
i1II1i1iiI1 = struct . calcsize ( IIiI1I11ii1i )
o0o0O00oOo = struct . unpack ( IIiI1I11ii1i , packet [ : i1II1i1iiI1 ] ) [ 0 ]
packet = packet [ i1II1i1iiI1 : : ]
if ( o0o0O00oOo != 0 ) : return ( packet )
if 62 - 62: O0 + iII111i - iII111i % iIii1I11I1II1
if 47 - 47: I1Ii111 + I1IiiI
if 40 - 40: iIii1I11I1II1 % Ii1I + II111iiii - I1IiiI
if 80 - 80: oO0o
if ( I1ii . has_key ( "source-eid" ) == False ) : return ( packet )
Oo00o = I1ii [ "source-eid" ]
o0o0O00oOo = LISP_AFI_IPV4 if Oo00o . count ( "." ) == 3 else LISP_AFI_IPV6 if Oo00o . count ( ":" ) == 7 else None
if 14 - 14: II111iiii + O0 - iII111i
if ( o0o0O00oOo == None ) :
lprint ( "Bad JSON 'source-eid' value: {}" . format ( Oo00o ) )
return ( None )
if 18 - 18: o0oOOo0O0Ooo / i11iIiiIii % I1ii11iIi11i * OoooooooOO
if 67 - 67: OoOoOO00
self . source_eid . afi = o0o0O00oOo
self . source_eid . store_address ( Oo00o )
if 67 - 67: I1ii11iIi11i / iII111i + iIii1I11I1II1 % I1IiiI
if ( I1ii . has_key ( "signature-eid" ) == False ) : return ( packet )
Oo00o = I1ii [ "signature-eid" ]
if ( Oo00o . count ( ":" ) != 7 ) :
lprint ( "Bad JSON 'signature-eid' value: {}" . format ( Oo00o ) )
return ( None )
if 99 - 99: ooOoO0o . Ii1I
if 92 - 92: i1IIi
self . signature_eid . afi = LISP_AFI_IPV6
self . signature_eid . store_address ( Oo00o )
if 68 - 68: OoO0O00 % IiII - oO0o - ooOoO0o . Oo0Ooo
if ( I1ii . has_key ( "signature" ) == False ) : return ( packet )
o0o000OOO = binascii . a2b_base64 ( I1ii [ "signature" ] )
self . map_request_signature = o0o000OOO
return ( packet )
if 30 - 30: OoooooooOO % o0oOOo0O0Ooo + ooOoO0o * OoO0O00
if 57 - 57: I11i + iIii1I11I1II1 . OoO0O00 + oO0o
def decode ( self , packet , source , port ) :
IIiI1I11ii1i = "I"
i1II1i1iiI1 = struct . calcsize ( IIiI1I11ii1i )
if ( len ( packet ) < i1II1i1iiI1 ) : return ( None )
if 4 - 4: Ii1I
O0oooOO = struct . unpack ( IIiI1I11ii1i , packet [ : i1II1i1iiI1 ] )
O0oooOO = O0oooOO [ 0 ]
packet = packet [ i1II1i1iiI1 : : ]
if 43 - 43: i1IIi . I1IiiI * iIii1I11I1II1 * i11iIiiIii - OOooOOo + ooOoO0o
IIiI1I11ii1i = "Q"
i1II1i1iiI1 = struct . calcsize ( IIiI1I11ii1i )
if ( len ( packet ) < i1II1i1iiI1 ) : return ( None )
if 56 - 56: Oo0Ooo % i11iIiiIii / Ii1I . I1Ii111 . OoO0O00 - OoOoOO00
i11III1I = struct . unpack ( IIiI1I11ii1i , packet [ : i1II1i1iiI1 ] )
packet = packet [ i1II1i1iiI1 : : ]
if 32 - 32: I1Ii111 / oO0o / I1IiiI
O0oooOO = socket . ntohl ( O0oooOO )
self . auth_bit = True if ( O0oooOO & 0x08000000 ) else False
self . map_data_present = True if ( O0oooOO & 0x04000000 ) else False
self . rloc_probe = True if ( O0oooOO & 0x02000000 ) else False
self . smr_bit = True if ( O0oooOO & 0x01000000 ) else False
self . pitr_bit = True if ( O0oooOO & 0x00800000 ) else False
self . smr_invoked_bit = True if ( O0oooOO & 0x00400000 ) else False
self . mobile_node = True if ( O0oooOO & 0x00200000 ) else False
self . xtr_id_present = True if ( O0oooOO & 0x00100000 ) else False
self . local_xtr = True if ( O0oooOO & 0x00004000 ) else False
self . dont_reply_bit = True if ( O0oooOO & 0x00002000 ) else False
self . itr_rloc_count = ( ( O0oooOO >> 8 ) & 0x1f ) + 1
self . record_count = O0oooOO & 0xff
self . nonce = i11III1I [ 0 ]
if 22 - 22: OoO0O00 - OoOoOO00 . Oo0Ooo + o0oOOo0O0Ooo
if 69 - 69: oO0o - I1IiiI
if 10 - 10: i1IIi / iII111i . II111iiii * i1IIi % OoooooooOO
if 83 - 83: I11i . OOooOOo + I1Ii111 * I11i . I1Ii111 + oO0o
if ( self . xtr_id_present ) :
if ( self . decode_xtr_id ( packet ) == False ) : return ( None )
if 64 - 64: Ii1I . o0oOOo0O0Ooo - i1IIi
if 35 - 35: I1ii11iIi11i % OoooooooOO
i1II1i1iiI1 = struct . calcsize ( "H" )
if ( len ( packet ) < i1II1i1iiI1 ) : return ( None )
if 59 - 59: I1IiiI % I11i
o0o0O00oOo = struct . unpack ( "H" , packet [ : i1II1i1iiI1 ] )
self . source_eid . afi = socket . ntohs ( o0o0O00oOo [ 0 ] )
packet = packet [ i1II1i1iiI1 : : ]
if 32 - 32: I1IiiI * O0 + O0
if ( self . source_eid . afi == LISP_AFI_LCAF ) :
iiiiIiI1IIiI = packet
packet = self . source_eid . lcaf_decode_iid ( packet )
if ( packet == None ) :
packet = self . lcaf_decode_json ( iiiiIiI1IIiI )
if ( packet == None ) : return ( None )
if 53 - 53: iIii1I11I1II1 % OoOoOO00 % I1IiiI + I1ii11iIi11i % OoooooooOO
elif ( self . source_eid . afi != LISP_AFI_NONE ) :
packet = self . source_eid . unpack_address ( packet )
if ( packet == None ) : return ( None )
if 29 - 29: I1IiiI / o0oOOo0O0Ooo + iIii1I11I1II1 / O0 / OOooOOo % i1IIi
self . source_eid . mask_len = self . source_eid . host_mask_len ( )
if 65 - 65: OoO0O00 * OoOoOO00 . OoooooooOO - O0 * OoOoOO00 % OoOoOO00
IiiiIii = ( os . getenv ( "LISP_NO_CRYPTO" ) != None )
self . itr_rlocs = [ ]
while ( self . itr_rloc_count != 0 ) :
i1II1i1iiI1 = struct . calcsize ( "H" )
if ( len ( packet ) < i1II1i1iiI1 ) : return ( None )
if 67 - 67: OoOoOO00 % o0oOOo0O0Ooo % I1ii11iIi11i . OoO0O00 . II111iiii + IiII
o0o0O00oOo = struct . unpack ( "H" , packet [ : i1II1i1iiI1 ] ) [ 0 ]
if 50 - 50: Oo0Ooo + iII111i . O0 - i1IIi / Oo0Ooo
OooOoOOo0 = lisp_address ( LISP_AFI_NONE , "" , 32 , 0 )
OooOoOOo0 . afi = socket . ntohs ( o0o0O00oOo )
if 59 - 59: oO0o * ooOoO0o + oO0o + I1ii11iIi11i
if 80 - 80: o0oOOo0O0Ooo . OoOoOO00 - o0oOOo0O0Ooo - I1Ii111 / I11i
if 17 - 17: OoO0O00 - I1Ii111 - II111iiii / I1Ii111 / Ii1I
if 30 - 30: OOooOOo * I1ii11iIi11i % I1ii11iIi11i + iII111i * IiII
if 33 - 33: o0oOOo0O0Ooo + I11i * O0 * OoO0O00 . I1ii11iIi11i
if ( OooOoOOo0 . afi != LISP_AFI_LCAF ) :
if ( len ( packet ) < OooOoOOo0 . addr_length ( ) ) : return ( None )
packet = OooOoOOo0 . unpack_address ( packet [ i1II1i1iiI1 : : ] )
if ( packet == None ) : return ( None )
if 74 - 74: iII111i * iII111i * o0oOOo0O0Ooo / oO0o
if ( IiiiIii ) :
self . itr_rlocs . append ( OooOoOOo0 )
self . itr_rloc_count -= 1
continue
if 91 - 91: i11iIiiIii . I1ii11iIi11i / II111iiii
if 97 - 97: Ii1I % i1IIi % IiII + Oo0Ooo - O0 - I11i
ooOOo0o = lisp_build_crypto_decap_lookup_key ( OooOoOOo0 , port )
if 64 - 64: Ii1I - iII111i
if 12 - 12: i1IIi
if 99 - 99: II111iiii - I1ii11iIi11i * IiII
if 3 - 3: IiII - I1ii11iIi11i * iII111i * I1ii11iIi11i + Oo0Ooo
if 15 - 15: I1ii11iIi11i * Ii1I / iII111i . o0oOOo0O0Ooo / Ii1I % OoOoOO00
if ( lisp_nat_traversal and OooOoOOo0 . is_private_address ( ) and source ) : OooOoOOo0 = source
if 75 - 75: OoooooooOO % i11iIiiIii % iIii1I11I1II1 % I1ii11iIi11i / i11iIiiIii
O0oOoo0ooO00 = lisp_crypto_keys_by_rloc_decap
if ( O0oOoo0ooO00 . has_key ( ooOOo0o ) ) : O0oOoo0ooO00 . pop ( ooOOo0o )
if 86 - 86: I1IiiI . II111iiii * i1IIi % I1IiiI . OOooOOo
if 79 - 79: OoO0O00 + O0 * OOooOOo
if 51 - 51: i1IIi - oO0o / oO0o % o0oOOo0O0Ooo
if 98 - 98: OoO0O00 * ooOoO0o + i1IIi + IiII - i1IIi % OoOoOO00
if 19 - 19: iIii1I11I1II1 * Oo0Ooo / OOooOOo
if 5 - 5: o0oOOo0O0Ooo
lisp_write_ipc_decap_key ( ooOOo0o , None )
else :
oOO = packet
i1II1Ii = lisp_keys ( 1 )
packet = i1II1Ii . decode_lcaf ( oOO , 0 )
if ( packet == None ) : return ( None )
if 92 - 92: Oo0Ooo - II111iiii
if 7 - 7: i11iIiiIii + ooOoO0o . I1Ii111 + i1IIi - o0oOOo0O0Ooo
if 82 - 82: II111iiii + ooOoO0o * OOooOOo . iIii1I11I1II1 - i11iIiiIii * iIii1I11I1II1
if 42 - 42: o0oOOo0O0Ooo * oO0o . OOooOOo
iii1IiI = [ LISP_CS_25519_CBC , LISP_CS_25519_GCM ,
LISP_CS_25519_CHACHA ]
if ( i1II1Ii . cipher_suite in iii1IiI ) :
if ( i1II1Ii . cipher_suite == LISP_CS_25519_CBC or
i1II1Ii . cipher_suite == LISP_CS_25519_GCM ) :
Iiii11 = lisp_keys ( 1 , do_poly = False , do_chacha = False )
if 46 - 46: I1ii11iIi11i - I1Ii111 % I1ii11iIi11i - i11iIiiIii
if ( i1II1Ii . cipher_suite == LISP_CS_25519_CHACHA ) :
Iiii11 = lisp_keys ( 1 , do_poly = True , do_chacha = True )
if 50 - 50: I1Ii111 % IiII
else :
Iiii11 = lisp_keys ( 1 , do_poly = False , do_curve = False ,
do_chacha = False )
if 63 - 63: OoooooooOO . Ii1I - oO0o / II111iiii + I1IiiI
packet = Iiii11 . decode_lcaf ( oOO , 0 )
if ( packet == None ) : return ( None )
if 97 - 97: I11i
if ( len ( packet ) < i1II1i1iiI1 ) : return ( None )
o0o0O00oOo = struct . unpack ( "H" , packet [ : i1II1i1iiI1 ] ) [ 0 ]
OooOoOOo0 . afi = socket . ntohs ( o0o0O00oOo )
if ( len ( packet ) < OooOoOOo0 . addr_length ( ) ) : return ( None )
if 84 - 84: IiII - OoOoOO00 . IiII + ooOoO0o . iII111i
packet = OooOoOOo0 . unpack_address ( packet [ i1II1i1iiI1 : : ] )
if ( packet == None ) : return ( None )
if 96 - 96: Ii1I % iII111i * Ii1I % I1IiiI . o0oOOo0O0Ooo / o0oOOo0O0Ooo
if ( IiiiIii ) :
self . itr_rlocs . append ( OooOoOOo0 )
self . itr_rloc_count -= 1
continue
if 7 - 7: OoO0O00 - ooOoO0o % i1IIi
if 24 - 24: OoO0O00 % O0 % I11i
ooOOo0o = lisp_build_crypto_decap_lookup_key ( OooOoOOo0 , port )
if 61 - 61: ooOoO0o . iII111i / ooOoO0o * OoooooooOO
iiiiI = None
if ( lisp_nat_traversal and OooOoOOo0 . is_private_address ( ) and source ) : OooOoOOo0 = source
if 18 - 18: oO0o * Oo0Ooo % i11iIiiIii + O0 % OOooOOo . OOooOOo
if 84 - 84: OoooooooOO - Oo0Ooo
if ( lisp_crypto_keys_by_rloc_decap . has_key ( ooOOo0o ) ) :
i1iIi = lisp_crypto_keys_by_rloc_decap [ ooOOo0o ]
iiiiI = i1iIi [ 1 ] if i1iIi and i1iIi [ 1 ] else None
if 79 - 79: O0 - oO0o + oO0o . Ii1I . OoOoOO00 - I1ii11iIi11i
if 74 - 74: ooOoO0o % I1ii11iIi11i * i1IIi
iiiii1I = True
if ( iiiiI ) :
if ( iiiiI . compare_keys ( Iiii11 ) ) :
self . keys = [ None , iiiiI , None , None ]
lprint ( "Maintain stored decap-keys for RLOC {}" . format ( red ( ooOOo0o , False ) ) )
if 22 - 22: iII111i . OoooooooOO . Oo0Ooo
else :
iiiii1I = False
IIiI = bold ( "Remote decap-rekeying" , False )
lprint ( "{} for RLOC {}" . format ( IIiI , red ( ooOOo0o ,
False ) ) )
Iiii11 . copy_keypair ( iiiiI )
Iiii11 . uptime = iiiiI . uptime
iiiiI = None
if 70 - 70: OoooooooOO * i11iIiiIii
if 60 - 60: IiII / iIii1I11I1II1 + OoooooooOO - I1ii11iIi11i * i11iIiiIii
if 47 - 47: O0 . I1IiiI / ooOoO0o % i11iIiiIii
if ( iiiiI == None ) :
self . keys = [ None , Iiii11 , None , None ]
if ( lisp_i_am_etr == False and lisp_i_am_rtr == False ) :
Iiii11 . local_public_key = None
lprint ( "{} for {}" . format ( bold ( "Ignoring decap-keys" ,
False ) , red ( ooOOo0o , False ) ) )
elif ( Iiii11 . remote_public_key != None ) :
if ( iiiii1I ) :
lprint ( "{} for RLOC {}" . format ( bold ( "New decap-keying" , False ) ,
# O0 - i11iIiiIii % OoOoOO00
red ( ooOOo0o , False ) ) )
if 17 - 17: o0oOOo0O0Ooo
Iiii11 . compute_shared_key ( "decap" )
Iiii11 . add_key_by_rloc ( ooOOo0o , False )
if 39 - 39: o0oOOo0O0Ooo
if 89 - 89: OoooooooOO + iII111i . I1Ii111 / Ii1I
if 75 - 75: iIii1I11I1II1 * iII111i / OoOoOO00 * II111iiii . i1IIi
if 6 - 6: Ii1I % Ii1I / OoooooooOO * oO0o . I1IiiI . i1IIi
self . itr_rlocs . append ( OooOoOOo0 )
self . itr_rloc_count -= 1
if 59 - 59: I11i . I11i * I1IiiI - Ii1I % OoOoOO00
if 19 - 19: OoooooooOO / Oo0Ooo - I1Ii111 . OoOoOO00
i1II1i1iiI1 = struct . calcsize ( "BBH" )
if ( len ( packet ) < i1II1i1iiI1 ) : return ( None )
if 8 - 8: I11i % ooOoO0o . iIii1I11I1II1
ooO0i11i1i1i , Ooo , o0o0O00oOo = struct . unpack ( "BBH" , packet [ : i1II1i1iiI1 ] )
self . subscribe_bit = ( ooO0i11i1i1i & 0x80 )
self . target_eid . afi = socket . ntohs ( o0o0O00oOo )
packet = packet [ i1II1i1iiI1 : : ]
if 95 - 95: o0oOOo0O0Ooo + i11iIiiIii . I1ii11iIi11i . ooOoO0o . o0oOOo0O0Ooo
self . target_eid . mask_len = Ooo
if ( self . target_eid . afi == LISP_AFI_LCAF ) :
packet , oOO0oOOOOO0 = self . target_eid . lcaf_decode_eid ( packet )
if ( packet == None ) : return ( None )
if ( oOO0oOOOOO0 ) : self . target_group = oOO0oOOOOO0
else :
packet = self . target_eid . unpack_address ( packet )
if ( packet == None ) : return ( None )
packet = packet [ i1II1i1iiI1 : : ]
if 99 - 99: OoOoOO00 . I1Ii111 * II111iiii - i11iIiiIii + I11i
return ( packet )
if 44 - 44: ooOoO0o * i11iIiiIii . iII111i / iIii1I11I1II1
if 44 - 44: OoO0O00
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . target_eid , self . target_group ) )
if 74 - 74: Ii1I * i1IIi * I11i - OoooooooOO . I1IiiI
if 24 - 24: II111iiii - i11iIiiIii * i1IIi . ooOoO0o
def encode_xtr_id ( self , packet ) :
o0oo0oo0 = self . xtr_id >> 64
IIi1II = self . xtr_id & 0xffffffffffffffff
o0oo0oo0 = byte_swap_64 ( o0oo0oo0 )
IIi1II = byte_swap_64 ( IIi1II )
packet += struct . pack ( "QQ" , o0oo0oo0 , IIi1II )
return ( packet )
if 42 - 42: I11i / i11iIiiIii
if 7 - 7: I11i
def decode_xtr_id ( self , packet ) :
i1II1i1iiI1 = struct . calcsize ( "QQ" )
if ( len ( packet ) < i1II1i1iiI1 ) : return ( None )
packet = packet [ len ( packet ) - i1II1i1iiI1 : : ]
o0oo0oo0 , IIi1II = struct . unpack ( "QQ" , packet [ : i1II1i1iiI1 ] )
o0oo0oo0 = byte_swap_64 ( o0oo0oo0 )
IIi1II = byte_swap_64 ( IIi1II )
self . xtr_id = ( o0oo0oo0 << 64 ) | IIi1II
return ( True )
if 50 - 50: i11iIiiIii . i11iIiiIii * i1IIi / i11iIiiIii . i1IIi - II111iiii
if 72 - 72: iIii1I11I1II1 / o0oOOo0O0Ooo . I1ii11iIi11i
if 78 - 78: iIii1I11I1II1 . i11iIiiIii % IiII * Ii1I + iII111i - iIii1I11I1II1
if 50 - 50: I1ii11iIi11i % Ii1I - I11i % Oo0Ooo - I11i - I1IiiI
if 99 - 99: IiII * OoOoOO00 - i1IIi / I1Ii111 . ooOoO0o % o0oOOo0O0Ooo
if 69 - 69: O0 . iII111i
if 96 - 96: O0
if 89 - 89: I1ii11iIi11i - Oo0Ooo
if 26 - 26: ooOoO0o % ooOoO0o / II111iiii / iII111i
if 2 - 2: i1IIi / i11iIiiIii + I1IiiI
if 95 - 95: I1ii11iIi11i / IiII % iIii1I11I1II1 + O0
if 6 - 6: IiII
if 73 - 73: o0oOOo0O0Ooo % o0oOOo0O0Ooo . OOooOOo * I1ii11iIi11i - Ii1I
if 97 - 97: IiII
if 15 - 15: O0 - I1IiiI / i1IIi . I1Ii111
if 64 - 64: ooOoO0o / i1IIi
if 100 - 100: II111iiii
if 16 - 16: Ii1I
if 96 - 96: o0oOOo0O0Ooo / I1Ii111 % Ii1I - ooOoO0o
if 35 - 35: OOooOOo
if 90 - 90: i11iIiiIii
if 47 - 47: OoO0O00 . i11iIiiIii
if 9 - 9: OoOoOO00 - I11i . OoooooooOO % ooOoO0o
if 13 - 13: OoO0O00 * iIii1I11I1II1 + II111iiii - Oo0Ooo - OoOoOO00
if 43 - 43: iII111i / I1Ii111 * I1IiiI % ooOoO0o % I1IiiI
if 18 - 18: OoO0O00
if 99 - 99: iII111i / oO0o . i11iIiiIii / I11i + i1IIi - I11i
if 50 - 50: i1IIi
if 56 - 56: OoO0O00 + I1Ii111 / Ii1I
if 75 - 75: OoOoOO00
if 96 - 96: o0oOOo0O0Ooo * I11i * Oo0Ooo
if 36 - 36: OoooooooOO + ooOoO0o . oO0o * ooOoO0o + IiII
class lisp_map_reply ( ) :
def __init__ ( self ) :
self . rloc_probe = False
self . echo_nonce_capable = False
self . security = False
self . record_count = 0
self . hop_count = 0
self . nonce = 0
self . keys = None
if 45 - 45: oO0o / iII111i + I1ii11iIi11i - Oo0Ooo - ooOoO0o . iIii1I11I1II1
if 52 - 52: I1IiiI + i1IIi . iII111i * I1IiiI
def print_map_reply ( self ) :
IIIIIiI11Ii = "{} -> flags: {}{}{}, hop-count: {}, record-count: {}, " + "nonce: 0x{}"
if 31 - 31: Oo0Ooo % iIii1I11I1II1 . O0
lprint ( IIIIIiI11Ii . format ( bold ( "Map-Reply" , False ) , "R" if self . rloc_probe else "r" ,
# II111iiii + Oo0Ooo % I1ii11iIi11i + ooOoO0o / OOooOOo
"E" if self . echo_nonce_capable else "e" ,
"S" if self . security else "s" , self . hop_count , self . record_count ,
lisp_hex_string ( self . nonce ) ) )
if 28 - 28: Ii1I % iIii1I11I1II1
if 72 - 72: I1ii11iIi11i / OoOoOO00 - i11iIiiIii
def encode ( self ) :
O0oooOO = ( LISP_MAP_REPLY << 28 ) | self . record_count
O0oooOO |= self . hop_count << 8
if ( self . rloc_probe ) : O0oooOO |= 0x08000000
if ( self . echo_nonce_capable ) : O0oooOO |= 0x04000000
if ( self . security ) : O0oooOO |= 0x02000000
if 67 - 67: OOooOOo / Ii1I
oOo = struct . pack ( "I" , socket . htonl ( O0oooOO ) )
oOo += struct . pack ( "Q" , self . nonce )
return ( oOo )
if 51 - 51: I11i % II111iiii - o0oOOo0O0Ooo % OoO0O00 * i11iIiiIii * iII111i
if 82 - 82: OoooooooOO / I1IiiI * II111iiii - OoooooooOO % iIii1I11I1II1 * OoO0O00
def decode ( self , packet ) :
IIiI1I11ii1i = "I"
i1II1i1iiI1 = struct . calcsize ( IIiI1I11ii1i )
if ( len ( packet ) < i1II1i1iiI1 ) : return ( None )
if 32 - 32: i11iIiiIii - OoOoOO00 * I11i . Oo0Ooo * ooOoO0o
O0oooOO = struct . unpack ( IIiI1I11ii1i , packet [ : i1II1i1iiI1 ] )
O0oooOO = O0oooOO [ 0 ]
packet = packet [ i1II1i1iiI1 : : ]
if 21 - 21: OOooOOo
IIiI1I11ii1i = "Q"
i1II1i1iiI1 = struct . calcsize ( IIiI1I11ii1i )
if ( len ( packet ) < i1II1i1iiI1 ) : return ( None )
if 11 - 11: oO0o % i11iIiiIii * O0
i11III1I = struct . unpack ( IIiI1I11ii1i , packet [ : i1II1i1iiI1 ] )
packet = packet [ i1II1i1iiI1 : : ]
if 28 - 28: I1Ii111 / iIii1I11I1II1 + OOooOOo . I1ii11iIi11i % OOooOOo + OoO0O00
O0oooOO = socket . ntohl ( O0oooOO )
self . rloc_probe = True if ( O0oooOO & 0x08000000 ) else False
self . echo_nonce_capable = True if ( O0oooOO & 0x04000000 ) else False
self . security = True if ( O0oooOO & 0x02000000 ) else False
self . hop_count = ( O0oooOO >> 8 ) & 0xff
self . record_count = O0oooOO & 0xff
self . nonce = i11III1I [ 0 ]
if 79 - 79: oO0o
if ( lisp_crypto_keys_by_nonce . has_key ( self . nonce ) ) :
self . keys = lisp_crypto_keys_by_nonce [ self . nonce ]
self . keys [ 1 ] . delete_key_by_nonce ( self . nonce )
if 39 - 39: I1Ii111 % oO0o % O0 % O0 - iII111i - oO0o
return ( packet )
if 83 - 83: i11iIiiIii + iIii1I11I1II1
if 21 - 21: o0oOOo0O0Ooo / i11iIiiIii % I1Ii111
if 56 - 56: o0oOOo0O0Ooo * iIii1I11I1II1 . Ii1I + OoOoOO00 % I1Ii111
if 11 - 11: OOooOOo
if 12 - 12: OoooooooOO * OOooOOo * I1ii11iIi11i * ooOoO0o
if 26 - 26: OoooooooOO . i1IIi + OoO0O00
if 42 - 42: i11iIiiIii * o0oOOo0O0Ooo % I11i % Oo0Ooo + o0oOOo0O0Ooo * i11iIiiIii
if 66 - 66: Ii1I / IiII . OoooooooOO * Oo0Ooo % i11iIiiIii
if 100 - 100: I1ii11iIi11i % II111iiii * i11iIiiIii - iII111i
if 69 - 69: OOooOOo + iII111i / I1Ii111
if 37 - 37: iIii1I11I1II1 * I11i / IiII * Oo0Ooo % i11iIiiIii
if 93 - 93: ooOoO0o + ooOoO0o
if 65 - 65: OoooooooOO * I11i * oO0o % I1ii11iIi11i * II111iiii
if 86 - 86: i11iIiiIii / I11i * iII111i - iII111i
if 32 - 32: Oo0Ooo . O0
if 48 - 48: I1ii11iIi11i % II111iiii + I11i
if 25 - 25: IiII * o0oOOo0O0Ooo / I1IiiI . IiII % II111iiii
if 50 - 50: OoOoOO00 * iII111i
if 59 - 59: I1IiiI * I1IiiI / I11i
if 92 - 92: o0oOOo0O0Ooo
if 8 - 8: iII111i + I1ii11iIi11i . Ii1I
if 50 - 50: Oo0Ooo
if 16 - 16: Ii1I - OoOoOO00 % Oo0Ooo / Ii1I . I11i + ooOoO0o
if 78 - 78: iIii1I11I1II1 + OoO0O00 + i11iIiiIii
if 21 - 21: Oo0Ooo + Ii1I % ooOoO0o + OoOoOO00 % I11i
if 22 - 22: i1IIi / OoooooooOO . OoO0O00
if 83 - 83: I1IiiI - OoooooooOO + I1ii11iIi11i . Ii1I / o0oOOo0O0Ooo + ooOoO0o
if 90 - 90: I1IiiI - i11iIiiIii
if 42 - 42: OOooOOo . Oo0Ooo
if 21 - 21: iII111i . I1IiiI / I11i
if 97 - 97: iIii1I11I1II1 + i1IIi - o0oOOo0O0Ooo
if 73 - 73: OoO0O00 - i11iIiiIii % I1Ii111 / Oo0Ooo - OoooooooOO % OOooOOo
class lisp_eid_record ( ) :
def __init__ ( self ) :
self . record_ttl = 0
self . rloc_count = 0
self . action = 0
self . authoritative = False
self . ddt_incomplete = False
self . signature_count = 0
self . map_version = 0
self . eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . record_ttl = 0
if 79 - 79: I1IiiI / o0oOOo0O0Ooo . Ii1I * I1ii11iIi11i + I11i
if 96 - 96: OoO0O00 * II111iiii
def print_prefix ( self ) :
if ( self . group . is_null ( ) ) :
return ( green ( self . eid . print_prefix ( ) , False ) )
if 1 - 1: I1IiiI - OoOoOO00
return ( green ( self . eid . print_sg ( self . group ) , False ) )
if 74 - 74: OoOoOO00 * II111iiii + O0 + I11i
if 3 - 3: iIii1I11I1II1 - i1IIi / iII111i + i1IIi + O0
def print_ttl ( self ) :
Ii1 = self . record_ttl
if ( self . record_ttl & 0x80000000 ) :
Ii1 = str ( self . record_ttl & 0x7fffffff ) + " secs"
elif ( ( Ii1 % 60 ) == 0 ) :
Ii1 = str ( Ii1 / 60 ) + " hours"
else :
Ii1 = str ( Ii1 ) + " mins"
if 84 - 84: OoOoOO00 - ooOoO0o - OoooooooOO . OoooooooOO % IiII
return ( Ii1 )
if 38 - 38: OoO0O00 * I1ii11iIi11i
if 4 - 4: OoO0O00 . I1ii11iIi11i
def store_ttl ( self ) :
Ii1 = self . record_ttl * 60
if ( self . record_ttl & 0x80000000 ) : Ii1 = self . record_ttl & 0x7fffffff
return ( Ii1 )
if 21 - 21: i11iIiiIii / OoO0O00 / I1ii11iIi11i * O0 - II111iiii * OOooOOo
if 27 - 27: o0oOOo0O0Ooo . OoOoOO00 * Ii1I * iII111i * O0
def print_record ( self , indent , ddt ) :
o000ooo0o0O = ""
iiiI11iiI11 = ""
iII1i1iIi11I = bold ( "invalid-action" , False )
if ( ddt ) :
if ( self . action < len ( lisp_map_referral_action_string ) ) :
iII1i1iIi11I = lisp_map_referral_action_string [ self . action ]
iII1i1iIi11I = bold ( iII1i1iIi11I , False )
o000ooo0o0O = ( ", " + bold ( "ddt-incomplete" , False ) ) if self . ddt_incomplete else ""
if 55 - 55: IiII
iiiI11iiI11 = ( ", sig-count: " + str ( self . signature_count ) ) if ( self . signature_count != 0 ) else ""
if 12 - 12: i11iIiiIii + I1ii11iIi11i * OoO0O00
if 13 - 13: Oo0Ooo + OoooooooOO / IiII
else :
if ( self . action < len ( lisp_map_reply_action_string ) ) :
iII1i1iIi11I = lisp_map_reply_action_string [ self . action ]
if ( self . action != LISP_NO_ACTION ) :
iII1i1iIi11I = bold ( iII1i1iIi11I , False )
if 56 - 56: I1ii11iIi11i * II111iiii
if 75 - 75: I11i . o0oOOo0O0Ooo - i11iIiiIii / I11i
if 100 - 100: i11iIiiIii * i11iIiiIii . iIii1I11I1II1 % iII111i * I1ii11iIi11i
if 17 - 17: Ii1I * IiII * i11iIiiIii / I1ii11iIi11i / i11iIiiIii
o0o0O00oOo = LISP_AFI_LCAF if ( self . eid . afi < 0 ) else self . eid . afi
IIIIIiI11Ii = ( "{}EID-record -> record-ttl: {}, rloc-count: {}, action: " +
"{}, {}{}{}, map-version: {}, afi: {}, [iid]eid/ml: {}" )
if 23 - 23: OoooooooOO + i11iIiiIii / Oo0Ooo / iII111i . iII111i * I1IiiI
lprint ( IIIIIiI11Ii . format ( indent , self . print_ttl ( ) , self . rloc_count ,
iII1i1iIi11I , "auth" if ( self . authoritative is True ) else "non-auth" ,
o000ooo0o0O , iiiI11iiI11 , self . map_version , o0o0O00oOo ,
green ( self . print_prefix ( ) , False ) ) )
if 98 - 98: IiII
if 23 - 23: I11i / i1IIi * OoO0O00
def encode ( self ) :
O0oo0oo0 = self . action << 13
if ( self . authoritative ) : O0oo0oo0 |= 0x1000
if ( self . ddt_incomplete ) : O0oo0oo0 |= 0x800
if 40 - 40: OoO0O00
if 1 - 1: I11i + oO0o - iII111i . Ii1I
if 76 - 76: IiII
if 6 - 6: Oo0Ooo % oO0o * ooOoO0o - i1IIi . OoOoOO00
o0o0O00oOo = self . eid . afi if ( self . eid . instance_id == 0 ) else LISP_AFI_LCAF
if ( o0o0O00oOo < 0 ) : o0o0O00oOo = LISP_AFI_LCAF
iIi1I = ( self . group . is_null ( ) == False )
if ( iIi1I ) : o0o0O00oOo = LISP_AFI_LCAF
if 60 - 60: I1ii11iIi11i - I1IiiI * O0 * Oo0Ooo . i1IIi . OoOoOO00
i1ii1I1ii111I = ( self . signature_count << 12 ) | self . map_version
Ooo = 0 if self . eid . is_binary ( ) == False else self . eid . mask_len
if 45 - 45: I1ii11iIi11i . I11i . II111iiii - II111iiii * OoooooooOO
oOo = struct . pack ( "IBBHHH" , socket . htonl ( self . record_ttl ) ,
self . rloc_count , Ooo , socket . htons ( O0oo0oo0 ) ,
socket . htons ( i1ii1I1ii111I ) , socket . htons ( o0o0O00oOo ) )
if 71 - 71: OOooOOo
if 87 - 87: II111iiii / iIii1I11I1II1 % I1ii11iIi11i
if 11 - 11: o0oOOo0O0Ooo * OoO0O00
if 92 - 92: OoOoOO00 . Oo0Ooo * I11i
if ( iIi1I ) :
oOo += self . eid . lcaf_encode_sg ( self . group )
return ( oOo )
if 86 - 86: O0
if 55 - 55: Ii1I / I1Ii111 / I1ii11iIi11i % ooOoO0o % I1IiiI
if 55 - 55: oO0o + OoooooooOO % i1IIi
if 24 - 24: I1ii11iIi11i - Oo0Ooo
if 36 - 36: I1IiiI . OOooOOo % II111iiii * IiII
if ( self . eid . afi == LISP_AFI_GEO_COORD and self . eid . instance_id == 0 ) :
oOo = oOo [ 0 : - 2 ]
oOo += self . eid . address . encode_geo ( )
return ( oOo )
if 34 - 34: I11i % iII111i - ooOoO0o - I1IiiI
if 44 - 44: Ii1I . o0oOOo0O0Ooo . iIii1I11I1II1 + OoooooooOO - I1IiiI
if 22 - 22: I11i * I1ii11iIi11i . OoooooooOO / Oo0Ooo / Ii1I
if 54 - 54: I1Ii111 % Ii1I + ooOoO0o
if 45 - 45: Ii1I / oO0o * I1Ii111 . Ii1I
if ( o0o0O00oOo == LISP_AFI_LCAF ) :
oOo += self . eid . lcaf_encode_iid ( )
return ( oOo )
if 25 - 25: I1ii11iIi11i / I1ii11iIi11i
if 79 - 79: Oo0Ooo - OoO0O00 % Oo0Ooo . II111iiii
if 84 - 84: ooOoO0o * OoooooooOO + O0
if 84 - 84: i1IIi . I11i . i1IIi . Oo0Ooo
if 21 - 21: II111iiii . O0 + Oo0Ooo - i11iIiiIii
oOo += self . eid . pack_address ( )
return ( oOo )
if 5 - 5: iIii1I11I1II1 * i11iIiiIii + OoO0O00 + I11i * O0 % ooOoO0o
if 88 - 88: o0oOOo0O0Ooo / i11iIiiIii * I1ii11iIi11i
def decode ( self , packet ) :
IIiI1I11ii1i = "IBBHHH"
i1II1i1iiI1 = struct . calcsize ( IIiI1I11ii1i )
if ( len ( packet ) < i1II1i1iiI1 ) : return ( None )
if 23 - 23: O0 / iII111i
self . record_ttl , self . rloc_count , self . eid . mask_len , O0oo0oo0 , self . map_version , self . eid . afi = struct . unpack ( IIiI1I11ii1i , packet [ : i1II1i1iiI1 ] )
if 66 - 66: i1IIi % OoooooooOO * i11iIiiIii + oO0o * O0 / OoO0O00
if 14 - 14: I1IiiI . IiII
if 29 - 29: OoooooooOO / IiII + OoOoOO00 - I1Ii111 + IiII . i1IIi
self . record_ttl = socket . ntohl ( self . record_ttl )
O0oo0oo0 = socket . ntohs ( O0oo0oo0 )
self . action = ( O0oo0oo0 >> 13 ) & 0x7
self . authoritative = True if ( ( O0oo0oo0 >> 12 ) & 1 ) else False
self . ddt_incomplete = True if ( ( O0oo0oo0 >> 11 ) & 1 ) else False
self . map_version = socket . ntohs ( self . map_version )
self . signature_count = self . map_version >> 12
self . map_version = self . map_version & 0xfff
self . eid . afi = socket . ntohs ( self . eid . afi )
self . eid . instance_id = 0
packet = packet [ i1II1i1iiI1 : : ]
if 26 - 26: i11iIiiIii - II111iiii
if 43 - 43: I1IiiI
if 35 - 35: ooOoO0o + OoOoOO00 * OoooooooOO - II111iiii
if 19 - 19: i1IIi / Ii1I / OoOoOO00 . I1IiiI / Ii1I % o0oOOo0O0Ooo
if ( self . eid . afi == LISP_AFI_LCAF ) :
packet , i1i11Ii1 = self . eid . lcaf_decode_eid ( packet )
if ( i1i11Ii1 ) : self . group = i1i11Ii1
self . group . instance_id = self . eid . instance_id
return ( packet )
if 14 - 14: OOooOOo . o0oOOo0O0Ooo / II111iiii % OOooOOo
if 98 - 98: I1IiiI
packet = self . eid . unpack_address ( packet )
return ( packet )
if 51 - 51: OoOoOO00 * OoooooooOO * Oo0Ooo
if 28 - 28: i11iIiiIii - Ii1I
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 59 - 59: II111iiii - OoO0O00
if 31 - 31: I11i - OoOoOO00 / o0oOOo0O0Ooo * OoOoOO00 / Oo0Ooo + o0oOOo0O0Ooo
if 46 - 46: IiII * OoO0O00 / OOooOOo + Oo0Ooo
if 24 - 24: ooOoO0o % OOooOOo . O0 * Oo0Ooo
if 52 - 52: O0 . I1Ii111 + iII111i / i11iIiiIii
if 52 - 52: oO0o % Oo0Ooo * II111iiii
if 24 - 24: i11iIiiIii * i1IIi * i1IIi
if 27 - 27: i1IIi - oO0o + OOooOOo
if 3 - 3: IiII % I1Ii111 . OoooooooOO
if 19 - 19: I1Ii111 * Ii1I - oO0o
if 78 - 78: OoO0O00 - Ii1I / OOooOOo
if 81 - 81: OoOoOO00
if 21 - 21: iII111i / OOooOOo % IiII
if 51 - 51: I11i + ooOoO0o / I1IiiI
if 3 - 3: iIii1I11I1II1 / OOooOOo % oO0o . Ii1I - Ii1I
if 55 - 55: i11iIiiIii % OoooooooOO + O0
if 7 - 7: ooOoO0o - i11iIiiIii * iII111i / Ii1I - o0oOOo0O0Ooo
if 62 - 62: o0oOOo0O0Ooo - iIii1I11I1II1 . I11i . Ii1I * Ii1I
if 24 - 24: I11i
if 93 - 93: I1IiiI % OoO0O00 / i11iIiiIii / I11i
if 60 - 60: ooOoO0o - Ii1I . I1IiiI * oO0o * i11iIiiIii
if 29 - 29: OoO0O00 - Oo0Ooo . oO0o / OoO0O00 % i11iIiiIii
if 26 - 26: ooOoO0o . I1Ii111 / II111iiii % Ii1I
if 82 - 82: OOooOOo % O0 % iIii1I11I1II1 % IiII + i11iIiiIii
if 64 - 64: i1IIi / IiII . IiII - I1Ii111 % OOooOOo . II111iiii
if 78 - 78: I1Ii111 - O0 - I1Ii111 . iIii1I11I1II1 % I1ii11iIi11i . OoooooooOO
if 64 - 64: IiII
if 21 - 21: o0oOOo0O0Ooo - ooOoO0o * OoooooooOO . OoooooooOO
if 17 - 17: OOooOOo - iII111i % I1IiiI * OOooOOo * iIii1I11I1II1 . o0oOOo0O0Ooo
if 58 - 58: oO0o - II111iiii + O0
if 54 - 54: iIii1I11I1II1 - IiII - IiII
LISP_UDP_PROTOCOL = 17
LISP_DEFAULT_ECM_TTL = 128
if 18 - 18: i11iIiiIii + iIii1I11I1II1 . i11iIiiIii
class lisp_ecm ( ) :
def __init__ ( self , sport ) :
self . security = False
self . ddt = False
self . to_etr = False
self . to_ms = False
self . length = 0
self . ttl = LISP_DEFAULT_ECM_TTL
self . protocol = LISP_UDP_PROTOCOL
self . ip_checksum = 0
self . source = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . dest = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . udp_sport = sport
self . udp_dport = LISP_CTRL_PORT
self . udp_checksum = 0
self . udp_length = 0
self . afi = LISP_AFI_NONE
if 63 - 63: iII111i - OoO0O00 * OOooOOo
if 89 - 89: iII111i / Oo0Ooo
def print_ecm ( self ) :
IIIIIiI11Ii = ( "{} -> flags: {}{}{}{}, " + "inner IP: {} -> {}, inner UDP: {} -> {}" )
if 66 - 66: o0oOOo0O0Ooo + OoOoOO00 % OoooooooOO . I11i
lprint ( IIIIIiI11Ii . format ( bold ( "ECM" , False ) , "S" if self . security else "s" ,
"D" if self . ddt else "d" , "E" if self . to_etr else "e" ,
"M" if self . to_ms else "m" ,
green ( self . source . print_address ( ) , False ) ,
green ( self . dest . print_address ( ) , False ) , self . udp_sport ,
self . udp_dport ) )
if 30 - 30: II111iiii - Oo0Ooo - i11iIiiIii + O0
def encode ( self , packet , inner_source , inner_dest ) :
self . udp_length = len ( packet ) + 8
self . source = inner_source
self . dest = inner_dest
if ( inner_dest . is_ipv4 ( ) ) :
self . afi = LISP_AFI_IPV4
self . length = self . udp_length + 20
if 93 - 93: i1IIi + I1Ii111 / OoO0O00 - I11i % Oo0Ooo / Ii1I
if ( inner_dest . is_ipv6 ( ) ) :
self . afi = LISP_AFI_IPV6
self . length = self . udp_length
if 1 - 1: Oo0Ooo / Ii1I . i11iIiiIii % OOooOOo + o0oOOo0O0Ooo + O0
if 54 - 54: I1Ii111 + ooOoO0o % IiII
if 83 - 83: o0oOOo0O0Ooo * iIii1I11I1II1
if 36 - 36: OoOoOO00 + II111iiii - OoO0O00 % ooOoO0o * i1IIi
if 4 - 4: Ii1I + OoO0O00 * I1ii11iIi11i
if 13 - 13: OoOoOO00 - IiII * iIii1I11I1II1 * O0
O0oooOO = ( LISP_ECM << 28 )
if ( self . security ) : O0oooOO |= 0x08000000
if ( self . ddt ) : O0oooOO |= 0x04000000
if ( self . to_etr ) : O0oooOO |= 0x02000000
if ( self . to_ms ) : O0oooOO |= 0x01000000
if 26 - 26: OoooooooOO + oO0o + OoO0O00 . O0
Ii1I111Ii = struct . pack ( "I" , socket . htonl ( O0oooOO ) )
if 92 - 92: o0oOOo0O0Ooo * Ii1I / IiII % Oo0Ooo
oOo00OoO0O = ""
if ( self . afi == LISP_AFI_IPV4 ) :
oOo00OoO0O = struct . pack ( "BBHHHBBH" , 0x45 , 0 , socket . htons ( self . length ) ,
0 , 0 , self . ttl , self . protocol , socket . htons ( self . ip_checksum ) )
oOo00OoO0O += self . source . pack_address ( )
oOo00OoO0O += self . dest . pack_address ( )
oOo00OoO0O = lisp_ip_checksum ( oOo00OoO0O )
if 52 - 52: OoooooooOO + OoO0O00 * i1IIi / i11iIiiIii - I1Ii111
if ( self . afi == LISP_AFI_IPV6 ) :
oOo00OoO0O = struct . pack ( "BBHHBB" , 0x60 , 0 , 0 , socket . htons ( self . length ) ,
self . protocol , self . ttl )
oOo00OoO0O += self . source . pack_address ( )
oOo00OoO0O += self . dest . pack_address ( )
if 81 - 81: O0 % o0oOOo0O0Ooo / Ii1I / ooOoO0o . i11iIiiIii + IiII
if 29 - 29: ooOoO0o
o00oOOO = socket . htons ( self . udp_sport )
i1 = socket . htons ( self . udp_dport )
II1Ooo0000o00OO = socket . htons ( self . udp_length )
iI1I1iII1iII = socket . htons ( self . udp_checksum )
OOOOo00oo00O = struct . pack ( "HHHH" , o00oOOO , i1 , II1Ooo0000o00OO , iI1I1iII1iII )
return ( Ii1I111Ii + oOo00OoO0O + OOOOo00oo00O )
if 70 - 70: oO0o . O0 % I11i % IiII - I11i * I1ii11iIi11i
if 22 - 22: i1IIi
def decode ( self , packet ) :
if 82 - 82: oO0o . iIii1I11I1II1 - I1ii11iIi11i
if 55 - 55: Oo0Ooo % Ii1I . iIii1I11I1II1 * I1Ii111
if 33 - 33: O0 - I1IiiI / I1ii11iIi11i / OoO0O00 + iII111i - oO0o
if 27 - 27: I1Ii111 + ooOoO0o - I1Ii111 % i11iIiiIii * Oo0Ooo * o0oOOo0O0Ooo
IIiI1I11ii1i = "I"
i1II1i1iiI1 = struct . calcsize ( IIiI1I11ii1i )
if ( len ( packet ) < i1II1i1iiI1 ) : return ( None )
if 88 - 88: OOooOOo
O0oooOO = struct . unpack ( IIiI1I11ii1i , packet [ : i1II1i1iiI1 ] )
if 25 - 25: OoO0O00 + o0oOOo0O0Ooo . ooOoO0o - Ii1I . oO0o * Ii1I
O0oooOO = socket . ntohl ( O0oooOO [ 0 ] )
self . security = True if ( O0oooOO & 0x08000000 ) else False
self . ddt = True if ( O0oooOO & 0x04000000 ) else False
self . to_etr = True if ( O0oooOO & 0x02000000 ) else False
self . to_ms = True if ( O0oooOO & 0x01000000 ) else False
packet = packet [ i1II1i1iiI1 : : ]
if 85 - 85: i1IIi
if 94 - 94: OoooooooOO . O0 / OoooooooOO
if 67 - 67: i11iIiiIii + OoOoOO00
if 50 - 50: ooOoO0o . i1IIi + I1ii11iIi11i . OOooOOo
if ( len ( packet ) < 1 ) : return ( None )
oOOOO0 = struct . unpack ( "B" , packet [ 0 : 1 ] ) [ 0 ]
oOOOO0 = oOOOO0 >> 4
if 97 - 97: I1IiiI
if ( oOOOO0 == 4 ) :
i1II1i1iiI1 = struct . calcsize ( "HHIBBH" )
if ( len ( packet ) < i1II1i1iiI1 ) : return ( None )
if 63 - 63: O0 - OoOoOO00 / i11iIiiIii / OoooooooOO / ooOoO0o / II111iiii
IiiIii1111Ii1I1 , II1Ooo0000o00OO , IiiIii1111Ii1I1 , O00o00oOOo , i111 , iI1I1iII1iII = struct . unpack ( "HHIBBH" , packet [ : i1II1i1iiI1 ] )
self . length = socket . ntohs ( II1Ooo0000o00OO )
self . ttl = O00o00oOOo
self . protocol = i111
self . ip_checksum = socket . ntohs ( iI1I1iII1iII )
self . source . afi = self . dest . afi = LISP_AFI_IPV4
if 39 - 39: iII111i . Oo0Ooo - I1IiiI . I11i % I1IiiI % iII111i
if 27 - 27: OOooOOo - OOooOOo / i11iIiiIii * OoOoOO00 + O0
if 2 - 2: i11iIiiIii % I1IiiI
if 90 - 90: II111iiii
i111 = struct . pack ( "H" , 0 )
I1Ii1iiI1 = struct . calcsize ( "HHIBB" )
OO = struct . calcsize ( "H" )
packet = packet [ : I1Ii1iiI1 ] + i111 + packet [ I1Ii1iiI1 + OO : ]
if 92 - 92: I1Ii111 + OOooOOo - OoO0O00 . o0oOOo0O0Ooo
packet = packet [ i1II1i1iiI1 : : ]
packet = self . source . unpack_address ( packet )
if ( packet == None ) : return ( None )
packet = self . dest . unpack_address ( packet )
if ( packet == None ) : return ( None )
if 16 - 16: I1IiiI - ooOoO0o
if 39 - 39: i1IIi % i1IIi / iIii1I11I1II1 % OoooooooOO . ooOoO0o
if ( oOOOO0 == 6 ) :
i1II1i1iiI1 = struct . calcsize ( "IHBB" )
if ( len ( packet ) < i1II1i1iiI1 ) : return ( None )
if 30 - 30: o0oOOo0O0Ooo - Ii1I . i11iIiiIii + oO0o % ooOoO0o + I1ii11iIi11i
IiiIii1111Ii1I1 , II1Ooo0000o00OO , i111 , O00o00oOOo = struct . unpack ( "IHBB" , packet [ : i1II1i1iiI1 ] )
self . length = socket . ntohs ( II1Ooo0000o00OO )
self . protocol = i111
self . ttl = O00o00oOOo
self . source . afi = self . dest . afi = LISP_AFI_IPV6
if 5 - 5: OOooOOo . iII111i . oO0o % IiII * O0
packet = packet [ i1II1i1iiI1 : : ]
packet = self . source . unpack_address ( packet )
if ( packet == None ) : return ( None )
packet = self . dest . unpack_address ( packet )
if ( packet == None ) : return ( None )
if 20 - 20: Oo0Ooo . I1IiiI . I1IiiI / OoooooooOO . OoooooooOO + iIii1I11I1II1
if 60 - 60: OoOoOO00 / ooOoO0o % iIii1I11I1II1
self . source . mask_len = self . source . host_mask_len ( )
self . dest . mask_len = self . dest . host_mask_len ( )
if 32 - 32: i11iIiiIii + II111iiii + II111iiii % I11i
i1II1i1iiI1 = struct . calcsize ( "HHHH" )
if ( len ( packet ) < i1II1i1iiI1 ) : return ( None )
if 96 - 96: o0oOOo0O0Ooo
o00oOOO , i1 , II1Ooo0000o00OO , iI1I1iII1iII = struct . unpack ( "HHHH" , packet [ : i1II1i1iiI1 ] )
self . udp_sport = socket . ntohs ( o00oOOO )
self . udp_dport = socket . ntohs ( i1 )
self . udp_length = socket . ntohs ( II1Ooo0000o00OO )
self . udp_checksum = socket . ntohs ( iI1I1iII1iII )
packet = packet [ i1II1i1iiI1 : : ]
return ( packet )
if 90 - 90: IiII * Ii1I . I11i / I1ii11iIi11i % I11i
if 58 - 58: iII111i % iIii1I11I1II1 * OoO0O00
if 25 - 25: I1Ii111 - ooOoO0o + Oo0Ooo . I1IiiI % iIii1I11I1II1
if 49 - 49: i1IIi + OoO0O00 + iII111i / Oo0Ooo
if 5 - 5: i11iIiiIii + I11i . IiII
if 9 - 9: i11iIiiIii / iIii1I11I1II1 - I1ii11iIi11i * I1ii11iIi11i
if 99 - 99: I11i
if 64 - 64: iIii1I11I1II1
if 61 - 61: Ii1I % Oo0Ooo + OoOoOO00
if 60 - 60: oO0o . OoooooooOO
if 40 - 40: I11i
if 44 - 44: ooOoO0o
if 35 - 35: II111iiii + iII111i / I1ii11iIi11i * I1IiiI . I11i
if 97 - 97: I1IiiI / o0oOOo0O0Ooo
if 13 - 13: I1ii11iIi11i
if 72 - 72: Oo0Ooo + IiII / Ii1I * Oo0Ooo
if 41 - 41: OOooOOo - OoOoOO00 . I1IiiI + i11iIiiIii + OoO0O00 * iII111i
if 85 - 85: OoO0O00 + II111iiii
if 87 - 87: OoO0O00
if 93 - 93: OoooooooOO
if 80 - 80: o0oOOo0O0Ooo
if 3 - 3: i11iIiiIii / OOooOOo + oO0o
if 10 - 10: OoO0O00 . OoO0O00 + O0
if 13 - 13: i1IIi . I1IiiI
if 45 - 45: ooOoO0o % I11i
if 37 - 37: iII111i
if 70 - 70: O0 + iIii1I11I1II1 % O0 * o0oOOo0O0Ooo - Oo0Ooo - ooOoO0o
if 94 - 94: i1IIi + IiII / OoooooooOO - oO0o / OOooOOo / OoOoOO00
if 55 - 55: OOooOOo
if 5 - 5: I11i / OoOoOO00
if 48 - 48: i1IIi - oO0o . OoooooooOO - OoO0O00 - i1IIi
if 19 - 19: oO0o % Ii1I + I1ii11iIi11i . II111iiii * i11iIiiIii
if 87 - 87: Ii1I / I1Ii111 % OoOoOO00 * I1ii11iIi11i - OoooooooOO / OoOoOO00
if 24 - 24: I11i . OOooOOo * i1IIi . I1ii11iIi11i / ooOoO0o / O0
if 62 - 62: o0oOOo0O0Ooo % II111iiii
if 22 - 22: oO0o - o0oOOo0O0Ooo
if 89 - 89: OOooOOo
if 34 - 34: iII111i . OOooOOo
if 13 - 13: OoO0O00 * OOooOOo + oO0o
if 21 - 21: i11iIiiIii . Ii1I % i1IIi * Ii1I . oO0o + Ii1I
if 92 - 92: i1IIi + OoO0O00 * I11i
if 70 - 70: Oo0Ooo
if 93 - 93: iII111i . I1ii11iIi11i . Oo0Ooo . oO0o . OoooooooOO
if 51 - 51: O0 - iII111i
if 65 - 65: O0 / II111iiii * IiII % Ii1I + o0oOOo0O0Ooo
if 43 - 43: I1Ii111 + OoO0O00 * OoooooooOO
if 85 - 85: iII111i + OOooOOo
if 36 - 36: OoO0O00 % II111iiii * O0 + II111iiii - oO0o - i1IIi
if 53 - 53: Ii1I - OOooOOo
if 75 - 75: iII111i % O0 - I11i - I1ii11iIi11i + I1IiiI - I1IiiI
if 87 - 87: i1IIi % Ii1I % i1IIi + iIii1I11I1II1
if 23 - 23: iIii1I11I1II1 * I11i . I1Ii111 - o0oOOo0O0Ooo
if 66 - 66: I1IiiI * I1Ii111 / i11iIiiIii / OOooOOo
if 19 - 19: ooOoO0o % iIii1I11I1II1 * OoooooooOO
if 60 - 60: I1Ii111 * iII111i / OoooooooOO * Oo0Ooo
if 47 - 47: iII111i + o0oOOo0O0Ooo % iIii1I11I1II1 * OoOoOO00
if 65 - 65: OOooOOo . II111iiii * i11iIiiIii + OOooOOo
if 99 - 99: I1ii11iIi11i % Oo0Ooo
if 31 - 31: o0oOOo0O0Ooo - II111iiii * OOooOOo . OOooOOo - oO0o
if 57 - 57: OOooOOo / i11iIiiIii / I1Ii111 - Oo0Ooo . iIii1I11I1II1
if 84 - 84: IiII
if 42 - 42: O0 . I1Ii111 / I11i
if 69 - 69: OoOoOO00 / I1Ii111 * I1IiiI
if 76 - 76: O0 + II111iiii * OoO0O00
if 1 - 1: o0oOOo0O0Ooo
if 34 - 34: o0oOOo0O0Ooo + OOooOOo . OoO0O00 + I1IiiI + OoooooooOO
if 90 - 90: Ii1I / OoOoOO00 - iIii1I11I1II1 / i1IIi * I1Ii111 - ooOoO0o
if 2 - 2: iII111i * I11i * ooOoO0o + i11iIiiIii + oO0o
if 81 - 81: o0oOOo0O0Ooo * OoO0O00
if 18 - 18: i11iIiiIii / o0oOOo0O0Ooo - oO0o . I11i * i1IIi
if 67 - 67: Ii1I
if 64 - 64: OoOoOO00 + iII111i * OoOoOO00 - I1IiiI * OoooooooOO
if 27 - 27: II111iiii + i11iIiiIii
if 32 - 32: i1IIi
if 76 - 76: II111iiii % ooOoO0o - I1ii11iIi11i
if 50 - 50: II111iiii / I1IiiI . Ii1I % i11iIiiIii
if 66 - 66: oO0o / OOooOOo / iII111i
if 5 - 5: I1Ii111 . oO0o
if 77 - 77: iII111i / i11iIiiIii
if 20 - 20: O0 . I11i
if 67 - 67: OoOoOO00 - ooOoO0o - iIii1I11I1II1
if 31 - 31: II111iiii + o0oOOo0O0Ooo * i11iIiiIii . o0oOOo0O0Ooo
if 73 - 73: oO0o / OOooOOo * II111iiii % OoooooooOO - i1IIi - ooOoO0o
if 43 - 43: o0oOOo0O0Ooo + Ii1I % OoO0O00 . I1Ii111 + i1IIi
if 85 - 85: Oo0Ooo % I1ii11iIi11i / OOooOOo
if 65 - 65: ooOoO0o + IiII - OoOoOO00 % II111iiii - iIii1I11I1II1
if 39 - 39: I1IiiI + I1ii11iIi11i - i11iIiiIii
if 43 - 43: iIii1I11I1II1
if 73 - 73: OoOoOO00 + o0oOOo0O0Ooo
if 58 - 58: i1IIi * I1ii11iIi11i % iII111i . OoO0O00 % IiII % I11i
if 63 - 63: I1ii11iIi11i % ooOoO0o % I1ii11iIi11i
if 71 - 71: Ii1I
if 43 - 43: o0oOOo0O0Ooo / ooOoO0o
if 88 - 88: i11iIiiIii - i1IIi + Oo0Ooo - O0
if 50 - 50: I1ii11iIi11i
if 37 - 37: oO0o % iII111i / II111iiii / OoO0O00 - IiII - ooOoO0o
if 69 - 69: I1ii11iIi11i . OoooooooOO % I1Ii111
if 79 - 79: I1IiiI - IiII . OoooooooOO - I1ii11iIi11i
if 79 - 79: OOooOOo + o0oOOo0O0Ooo % iII111i . oO0o
if 49 - 49: Ii1I + i11iIiiIii * OoOoOO00 . OoOoOO00 . I1ii11iIi11i . Oo0Ooo
if 61 - 61: I11i / OOooOOo
if 85 - 85: OoOoOO00 - I11i . OoOoOO00 . OoOoOO00
if 62 - 62: IiII % OoooooooOO * OoO0O00 + OoO0O00 % Ii1I % iII111i
if 66 - 66: I1IiiI . OOooOOo - OoO0O00 % Oo0Ooo * o0oOOo0O0Ooo - oO0o
if 68 - 68: I11i - i11iIiiIii / o0oOOo0O0Ooo + ooOoO0o / I1IiiI
if 31 - 31: I1Ii111 . OoooooooOO . i1IIi
if 65 - 65: OoO0O00 . ooOoO0o
if 12 - 12: I1Ii111 + O0 - oO0o . IiII
if 46 - 46: IiII . ooOoO0o / iII111i
if 63 - 63: II111iiii - I1ii11iIi11i * II111iiii
if 92 - 92: OoO0O00 % ooOoO0o * O0 % iIii1I11I1II1 / i1IIi / OoOoOO00
if 67 - 67: I1Ii111 + I11i + I1Ii111 . OOooOOo % o0oOOo0O0Ooo / ooOoO0o
if 78 - 78: I1ii11iIi11i . O0
if 56 - 56: oO0o - i1IIi * O0 / I11i * I1IiiI . I11i
if 54 - 54: i11iIiiIii % i1IIi + Oo0Ooo / OoOoOO00
if 26 - 26: I11i . I1ii11iIi11i
if 55 - 55: OoOoOO00 * I1Ii111 % OoO0O00 - OoO0O00
class lisp_rloc_record ( ) :
def __init__ ( self ) :
self . priority = 0
self . weight = 0
self . mpriority = 0
self . mweight = 0
self . local_bit = False
self . probe_bit = False
self . reach_bit = False
self . rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . geo = None
self . elp = None
self . rle = None
self . json = None
self . rloc_name = None
self . keys = None
if 34 - 34: O0 * OoO0O00 - oO0o - IiII * Ii1I . II111iiii
if 28 - 28: O0 % iII111i - i1IIi
def print_rloc_name ( self , cour = False ) :
if ( self . rloc_name == None ) : return ( "" )
i1OOO = self . rloc_name
if ( cour ) : i1OOO = lisp_print_cour ( i1OOO )
return ( 'rloc-name: {}' . format ( blue ( i1OOO , cour ) ) )
if 100 - 100: i11iIiiIii % ooOoO0o . ooOoO0o - I1ii11iIi11i % Oo0Ooo - iII111i
if 12 - 12: OoOoOO00 + I11i . OoO0O00 * i11iIiiIii * I11i * I1Ii111
def print_record ( self , indent ) :
ooOOo00o0ooO = self . print_rloc_name ( )
if ( ooOOo00o0ooO != "" ) : ooOOo00o0ooO = ", " + ooOOo00o0ooO
Ii1i11iIi1iII = ""
if ( self . geo ) :
i1i1Ii = ""
if ( self . geo . geo_name ) : i1i1Ii = "'{}' " . format ( self . geo . geo_name )
Ii1i11iIi1iII = ", geo: {}{}" . format ( i1i1Ii , self . geo . print_geo ( ) )
if 64 - 64: II111iiii + i11iIiiIii
iiiII1i11iII = ""
if ( self . elp ) :
i1i1Ii = ""
if ( self . elp . elp_name ) : i1i1Ii = "'{}' " . format ( self . elp . elp_name )
iiiII1i11iII = ", elp: {}{}" . format ( i1i1Ii , self . elp . print_elp ( True ) )
if 13 - 13: Ii1I - Oo0Ooo
oOOoo0O00 = ""
if ( self . rle ) :
i1i1Ii = ""
if ( self . rle . rle_name ) : i1i1Ii = "'{}' " . format ( self . rle . rle_name )
oOOoo0O00 = ", rle: {}{}" . format ( i1i1Ii , self . rle . print_rle ( False ) )
if 30 - 30: i1IIi
Oo00Oo0o000 = ""
if ( self . json ) :
i1i1Ii = ""
if ( self . json . json_name ) :
i1i1Ii = "'{}' " . format ( self . json . json_name )
if 93 - 93: OoOoOO00 - OoooooooOO
Oo00Oo0o000 = ", json: {}" . format ( self . json . print_json ( False ) )
if 92 - 92: OoOoOO00 . i1IIi
if 24 - 24: Oo0Ooo + I11i
I1iii = ""
if ( self . rloc . is_null ( ) == False and self . keys and self . keys [ 1 ] ) :
I1iii = ", " + self . keys [ 1 ] . print_keys ( )
if 79 - 79: I1ii11iIi11i - O0 / IiII
if 1 - 1: I1IiiI
IIIIIiI11Ii = ( "{}RLOC-record -> flags: {}, {}/{}/{}/{}, afi: {}, rloc: "
+ "{}{}{}{}{}{}{}" )
lprint ( IIIIIiI11Ii . format ( indent , self . print_flags ( ) , self . priority ,
self . weight , self . mpriority , self . mweight , self . rloc . afi ,
red ( self . rloc . print_address_no_iid ( ) , False ) , ooOOo00o0ooO , Ii1i11iIi1iII ,
iiiII1i11iII , oOOoo0O00 , Oo00Oo0o000 , I1iii ) )
if 25 - 25: O0 + OOooOOo / iII111i
if 51 - 51: I11i
def print_flags ( self ) :
return ( "{}{}{}" . format ( "L" if self . local_bit else "l" , "P" if self . probe_bit else "p" , "R" if self . reach_bit else "r" ) )
if 54 - 54: i1IIi . O0 . i1IIi . OoO0O00 + I1Ii111 - i11iIiiIii
if 80 - 80: OoOoOO00
if 5 - 5: I1IiiI - I1IiiI / O0 + OOooOOo - i11iIiiIii
def store_rloc_entry ( self , rloc_entry ) :
Oo0o0o0oo = rloc_entry . rloc if ( rloc_entry . translated_rloc . is_null ( ) ) else rloc_entry . translated_rloc
if 19 - 19: ooOoO0o
self . rloc . copy_address ( Oo0o0o0oo )
if 44 - 44: I1Ii111 - i11iIiiIii * I1IiiI
if ( rloc_entry . rloc_name ) :
self . rloc_name = rloc_entry . rloc_name
if 84 - 84: O0 % Ii1I
if 3 - 3: I1IiiI . I11i / I1ii11iIi11i
if ( rloc_entry . geo ) :
self . geo = rloc_entry . geo
else :
i1i1Ii = rloc_entry . geo_name
if ( i1i1Ii and lisp_geo_list . has_key ( i1i1Ii ) ) :
self . geo = lisp_geo_list [ i1i1Ii ]
if 2 - 2: IiII + I11i / iIii1I11I1II1 . i11iIiiIii . i1IIi * ooOoO0o
if 14 - 14: Oo0Ooo . O0 - oO0o - i11iIiiIii
if ( rloc_entry . elp ) :
self . elp = rloc_entry . elp
else :
i1i1Ii = rloc_entry . elp_name
if ( i1i1Ii and lisp_elp_list . has_key ( i1i1Ii ) ) :
self . elp = lisp_elp_list [ i1i1Ii ]
if 8 - 8: I1IiiI / iIii1I11I1II1 / OoooooooOO / Oo0Ooo / ooOoO0o
if 80 - 80: I11i
if ( rloc_entry . rle ) :
self . rle = rloc_entry . rle
else :
i1i1Ii = rloc_entry . rle_name
if ( i1i1Ii and lisp_rle_list . has_key ( i1i1Ii ) ) :
self . rle = lisp_rle_list [ i1i1Ii ]
if 26 - 26: II111iiii + I1IiiI . II111iiii - oO0o % OoO0O00
if 1 - 1: OoO0O00 - II111iiii
if ( rloc_entry . json ) :
self . json = rloc_entry . json
else :
i1i1Ii = rloc_entry . json_name
if ( i1i1Ii and lisp_json_list . has_key ( i1i1Ii ) ) :
self . json = lisp_json_list [ i1i1Ii ]
if 75 - 75: Oo0Ooo - OoOoOO00 + oO0o % i1IIi * OOooOOo
if 56 - 56: OoOoOO00 / OoO0O00 / I1IiiI % OoooooooOO
self . priority = rloc_entry . priority
self . weight = rloc_entry . weight
self . mpriority = rloc_entry . mpriority
self . mweight = rloc_entry . mweight
if 39 - 39: I1IiiI + II111iiii * Oo0Ooo % Ii1I . o0oOOo0O0Ooo * oO0o
if 42 - 42: Ii1I / Oo0Ooo
def encode_lcaf ( self ) :
ooOO0o0ooOo0 = socket . htons ( LISP_AFI_LCAF )
Ii1111I11I = ""
if ( self . geo ) :
Ii1111I11I = self . geo . encode_geo ( )
if 57 - 57: OoO0O00 % IiII % IiII - OoooooooOO % i1IIi
if 92 - 92: I1Ii111 + iIii1I11I1II1 . OoooooooOO + oO0o + I1Ii111
Ooo0oOOO = ""
if ( self . elp ) :
oOOoOOO000o = ""
for IIi1IiIii1 in self . elp . elp_nodes :
o0o0O00oOo = socket . htons ( IIi1IiIii1 . address . afi )
I11I = 0
if ( IIi1IiIii1 . eid ) : I11I |= 0x4
if ( IIi1IiIii1 . probe ) : I11I |= 0x2
if ( IIi1IiIii1 . strict ) : I11I |= 0x1
I11I = socket . htons ( I11I )
oOOoOOO000o += struct . pack ( "HH" , I11I , o0o0O00oOo )
oOOoOOO000o += IIi1IiIii1 . address . pack_address ( )
if 48 - 48: OoooooooOO + i11iIiiIii % O0
if 54 - 54: I1ii11iIi11i + I1ii11iIi11i % iIii1I11I1II1
O00 = socket . htons ( len ( oOOoOOO000o ) )
Ooo0oOOO = struct . pack ( "HBBBBH" , ooOO0o0ooOo0 , 0 , 0 , LISP_LCAF_ELP_TYPE ,
0 , O00 )
Ooo0oOOO += oOOoOOO000o
if 40 - 40: Ii1I % OoO0O00
if 19 - 19: I11i * oO0o * I11i + I1IiiI
Iii1 = ""
if ( self . rle ) :
OoOOOO0oO0Oo = ""
for I1I1iiI in self . rle . rle_nodes :
o0o0O00oOo = socket . htons ( I1I1iiI . address . afi )
OoOOOO0oO0Oo += struct . pack ( "HBBH" , 0 , 0 , I1I1iiI . level , o0o0O00oOo )
OoOOOO0oO0Oo += I1I1iiI . address . pack_address ( )
if ( I1I1iiI . rloc_name ) :
OoOOOO0oO0Oo += struct . pack ( "H" , socket . htons ( LISP_AFI_NAME ) )
OoOOOO0oO0Oo += I1I1iiI . rloc_name + "\0"
if 4 - 4: I1IiiI . i11iIiiIii % I11i + II111iiii - Ii1I - O0
if 23 - 23: OoOoOO00 / OOooOOo
if 84 - 84: OOooOOo / iIii1I11I1II1 - I1ii11iIi11i . Ii1I
I1IiiiIiiiIII = socket . htons ( len ( OoOOOO0oO0Oo ) )
Iii1 = struct . pack ( "HBBBBH" , ooOO0o0ooOo0 , 0 , 0 , LISP_LCAF_RLE_TYPE ,
0 , I1IiiiIiiiIII )
Iii1 += OoOOOO0oO0Oo
if 78 - 78: I1IiiI
if 90 - 90: I1Ii111
I11IIi11Iii1 = ""
if ( self . json ) :
i11iii11 = socket . htons ( len ( self . json . json_string ) + 2 )
I11111i = socket . htons ( len ( self . json . json_string ) )
I11IIi11Iii1 = struct . pack ( "HBBBBHH" , ooOO0o0ooOo0 , 0 , 0 , LISP_LCAF_JSON_TYPE ,
0 , i11iii11 , I11111i )
I11IIi11Iii1 += self . json . json_string
I11IIi11Iii1 += struct . pack ( "H" , 0 )
if 77 - 77: I1Ii111 * I1IiiI
if 27 - 27: O0
iIo0o0Oo0o0oOo = ""
if ( self . rloc . is_null ( ) == False and self . keys and self . keys [ 1 ] ) :
iIo0o0Oo0o0oOo = self . keys [ 1 ] . encode_lcaf ( self . rloc )
if 14 - 14: I1IiiI - i11iIiiIii * I1Ii111 . i11iIiiIii % ooOoO0o
if 53 - 53: O0 . o0oOOo0O0Ooo . II111iiii * OoOoOO00 . OOooOOo
OOOOoOOo0o0OO = ""
if ( self . rloc_name ) :
OOOOoOOo0o0OO += struct . pack ( "H" , socket . htons ( LISP_AFI_NAME ) )
OOOOoOOo0o0OO += self . rloc_name + "\0"
if 12 - 12: I1ii11iIi11i . i1IIi / I1IiiI . O0 * I1Ii111 / i1IIi
if 6 - 6: OOooOOo + OoO0O00 + I1IiiI / OoooooooOO
IiiiII1 = len ( Ii1111I11I ) + len ( Ooo0oOOO ) + len ( Iii1 ) + len ( iIo0o0Oo0o0oOo ) + 2 + len ( I11IIi11Iii1 ) + self . rloc . addr_length ( ) + len ( OOOOoOOo0o0OO )
if 94 - 94: I11i - o0oOOo0O0Ooo / I1Ii111
IiiiII1 = socket . htons ( IiiiII1 )
I1I1ii11 = struct . pack ( "HBBBBHH" , ooOO0o0ooOo0 , 0 , 0 , LISP_LCAF_AFI_LIST_TYPE ,
0 , IiiiII1 , socket . htons ( self . rloc . afi ) )
I1I1ii11 += self . rloc . pack_address ( )
return ( I1I1ii11 + OOOOoOOo0o0OO + Ii1111I11I + Ooo0oOOO + Iii1 + iIo0o0Oo0o0oOo + I11IIi11Iii1 )
if 35 - 35: I1Ii111 * Oo0Ooo / o0oOOo0O0Ooo
if 89 - 89: oO0o / OoooooooOO . Ii1I + Oo0Ooo + IiII / OoOoOO00
def encode ( self ) :
I11I = 0
if ( self . local_bit ) : I11I |= 0x0004
if ( self . probe_bit ) : I11I |= 0x0002
if ( self . reach_bit ) : I11I |= 0x0001
if 67 - 67: IiII
oOo = struct . pack ( "BBBBHH" , self . priority , self . weight ,
self . mpriority , self . mweight , socket . htons ( I11I ) ,
socket . htons ( self . rloc . afi ) )
if 66 - 66: i11iIiiIii * iII111i
if ( self . geo or self . elp or self . rle or self . keys or self . rloc_name or self . json ) :
if 51 - 51: OoooooooOO + I11i . iII111i + i11iIiiIii * iII111i - OoO0O00
oOo = oOo [ 0 : - 2 ] + self . encode_lcaf ( )
else :
oOo += self . rloc . pack_address ( )
if 60 - 60: iII111i * iIii1I11I1II1 . OoOoOO00 . o0oOOo0O0Ooo / iIii1I11I1II1
return ( oOo )
if 36 - 36: i1IIi . OoooooooOO - II111iiii - OoOoOO00 - IiII
if 53 - 53: I1ii11iIi11i - II111iiii . i11iIiiIii
def decode_lcaf ( self , packet , nonce ) :
IIiI1I11ii1i = "HBBBBH"
i1II1i1iiI1 = struct . calcsize ( IIiI1I11ii1i )
if ( len ( packet ) < i1II1i1iiI1 ) : return ( None )
if 76 - 76: iIii1I11I1II1 - Oo0Ooo
o0o0O00oOo , OoO0oOoo , I11I , o0O00o0o , ii11iIII111 , i11iii11 = struct . unpack ( IIiI1I11ii1i , packet [ : i1II1i1iiI1 ] )
if 79 - 79: I1IiiI * IiII . OoooooooOO % I1Ii111 * I1Ii111
if 17 - 17: I1Ii111 - I1Ii111 . oO0o / I1Ii111
i11iii11 = socket . ntohs ( i11iii11 )
packet = packet [ i1II1i1iiI1 : : ]
if ( i11iii11 > len ( packet ) ) : return ( None )
if 36 - 36: I1ii11iIi11i * i1IIi + iIii1I11I1II1
if 55 - 55: I1IiiI . I1Ii111 - I1IiiI % oO0o / iIii1I11I1II1 * Ii1I
if 77 - 77: OOooOOo
if 29 - 29: II111iiii % iIii1I11I1II1 * O0 . o0oOOo0O0Ooo
if ( o0O00o0o == LISP_LCAF_AFI_LIST_TYPE ) :
while ( i11iii11 > 0 ) :
IIiI1I11ii1i = "H"
i1II1i1iiI1 = struct . calcsize ( IIiI1I11ii1i )
if ( i11iii11 < i1II1i1iiI1 ) : return ( None )
if 56 - 56: i1IIi . ooOoO0o + I11i - i11iIiiIii
IIi1IiiIi1III = len ( packet )
o0o0O00oOo = struct . unpack ( IIiI1I11ii1i , packet [ : i1II1i1iiI1 ] ) [ 0 ]
o0o0O00oOo = socket . ntohs ( o0o0O00oOo )
if 100 - 100: iIii1I11I1II1 - i1IIi . OOooOOo
if ( o0o0O00oOo == LISP_AFI_LCAF ) :
packet = self . decode_lcaf ( packet , nonce )
if ( packet == None ) : return ( None )
else :
packet = packet [ i1II1i1iiI1 : : ]
self . rloc_name = None
if ( o0o0O00oOo == LISP_AFI_NAME ) :
packet , i1OOO = lisp_decode_dist_name ( packet )
self . rloc_name = i1OOO
else :
self . rloc . afi = o0o0O00oOo
packet = self . rloc . unpack_address ( packet )
if ( packet == None ) : return ( None )
self . rloc . mask_len = self . rloc . host_mask_len ( )
if 73 - 73: I1Ii111 / I11i / i11iIiiIii - I1ii11iIi11i % ooOoO0o
if 92 - 92: I1IiiI - o0oOOo0O0Ooo % I1ii11iIi11i / iII111i % oO0o
if 43 - 43: Oo0Ooo % oO0o . i11iIiiIii - O0
i11iii11 -= IIi1IiiIi1III - len ( packet )
if 5 - 5: i1IIi + Ii1I
if 38 - 38: I1IiiI . O0 + OOooOOo / I1ii11iIi11i . iIii1I11I1II1 - i1IIi
elif ( o0O00o0o == LISP_LCAF_GEO_COORD_TYPE ) :
if 3 - 3: Oo0Ooo + oO0o
if 65 - 65: I1IiiI / OoOoOO00 % I1IiiI * i11iIiiIii * OoooooooOO / I11i
if 91 - 91: i11iIiiIii / i11iIiiIii
if 9 - 9: I11i / I1Ii111 + iIii1I11I1II1 + I1IiiI - II111iiii
O0OOoo = lisp_geo ( "" )
packet = O0OOoo . decode_geo ( packet , i11iii11 , ii11iIII111 )
if ( packet == None ) : return ( None )
self . geo = O0OOoo
if 46 - 46: i1IIi % iIii1I11I1II1
elif ( o0O00o0o == LISP_LCAF_JSON_TYPE ) :
if 80 - 80: OoooooooOO / O0 / I1Ii111 - Oo0Ooo . i11iIiiIii
if 3 - 3: Oo0Ooo - OOooOOo * OoO0O00 - II111iiii . OoooooooOO
if 14 - 14: I1IiiI
if 41 - 41: I1Ii111 % i1IIi + OoO0O00 / oO0o
IIiI1I11ii1i = "H"
i1II1i1iiI1 = struct . calcsize ( IIiI1I11ii1i )
if ( i11iii11 < i1II1i1iiI1 ) : return ( None )
if 48 - 48: i1IIi . Oo0Ooo . i1IIi . I1ii11iIi11i * I1IiiI - Ii1I
I11111i = struct . unpack ( IIiI1I11ii1i , packet [ : i1II1i1iiI1 ] ) [ 0 ]
I11111i = socket . ntohs ( I11111i )
if ( i11iii11 < i1II1i1iiI1 + I11111i ) : return ( None )
if 83 - 83: OoooooooOO
packet = packet [ i1II1i1iiI1 : : ]
self . json = lisp_json ( "" , packet [ 0 : I11111i ] )
packet = packet [ I11111i : : ]
if 42 - 42: I1ii11iIi11i . i1IIi - OoOoOO00 - oO0o + i11iIiiIii
elif ( o0O00o0o == LISP_LCAF_ELP_TYPE ) :
if 65 - 65: I1IiiI - O0
if 15 - 15: I11i + OoOoOO00 / Oo0Ooo - I1IiiI * I1ii11iIi11i % oO0o
if 90 - 90: Ii1I / I11i
if 98 - 98: i1IIi
O0Oooo0 = lisp_elp ( None )
O0Oooo0 . elp_nodes = [ ]
while ( i11iii11 > 0 ) :
I11I , o0o0O00oOo = struct . unpack ( "HH" , packet [ : 4 ] )
if 84 - 84: iIii1I11I1II1 % Ii1I / OoooooooOO
o0o0O00oOo = socket . ntohs ( o0o0O00oOo )
if ( o0o0O00oOo == LISP_AFI_LCAF ) : return ( None )
if 62 - 62: OOooOOo * OoO0O00 * OoO0O00 + OoooooooOO . IiII + OoO0O00
IIi1IiIii1 = lisp_elp_node ( )
O0Oooo0 . elp_nodes . append ( IIi1IiIii1 )
if 13 - 13: O0 . I1IiiI % OoO0O00 - I11i . O0
I11I = socket . ntohs ( I11I )
IIi1IiIii1 . eid = ( I11I & 0x4 )
IIi1IiIii1 . probe = ( I11I & 0x2 )
IIi1IiIii1 . strict = ( I11I & 0x1 )
IIi1IiIii1 . address . afi = o0o0O00oOo
IIi1IiIii1 . address . mask_len = IIi1IiIii1 . address . host_mask_len ( )
packet = IIi1IiIii1 . address . unpack_address ( packet [ 4 : : ] )
i11iii11 -= IIi1IiIii1 . address . addr_length ( ) + 4
if 14 - 14: iIii1I11I1II1
O0Oooo0 . select_elp_node ( )
self . elp = O0Oooo0
if 48 - 48: i11iIiiIii * OoOoOO00 - I1IiiI + iIii1I11I1II1
elif ( o0O00o0o == LISP_LCAF_RLE_TYPE ) :
if 20 - 20: I1ii11iIi11i - iIii1I11I1II1 . iII111i
if 52 - 52: OoO0O00 - I1Ii111
if 9 - 9: I1IiiI . i11iIiiIii
if 3 - 3: I1IiiI + I1ii11iIi11i * I1Ii111 - i1IIi . OOooOOo
II1IIiiI1 = lisp_rle ( None )
II1IIiiI1 . rle_nodes = [ ]
while ( i11iii11 > 0 ) :
IiiIii1111Ii1I1 , iIIIIi , IiIi1II1Ii , o0o0O00oOo = struct . unpack ( "HBBH" , packet [ : 6 ] )
if 53 - 53: O0
o0o0O00oOo = socket . ntohs ( o0o0O00oOo )
if ( o0o0O00oOo == LISP_AFI_LCAF ) : return ( None )
if 28 - 28: iII111i % OoO0O00 . OoO0O00 / IiII * Oo0Ooo * iII111i
I1I1iiI = lisp_rle_node ( )
II1IIiiI1 . rle_nodes . append ( I1I1iiI )
if 49 - 49: I1IiiI / I1Ii111 * iII111i + I1IiiI % oO0o % ooOoO0o
I1I1iiI . level = IiIi1II1Ii
I1I1iiI . address . afi = o0o0O00oOo
I1I1iiI . address . mask_len = I1I1iiI . address . host_mask_len ( )
packet = I1I1iiI . address . unpack_address ( packet [ 6 : : ] )
if 27 - 27: OoO0O00 / iII111i . I1ii11iIi11i
i11iii11 -= I1I1iiI . address . addr_length ( ) + 6
if ( i11iii11 >= 2 ) :
o0o0O00oOo = struct . unpack ( "H" , packet [ : 2 ] ) [ 0 ]
if ( socket . ntohs ( o0o0O00oOo ) == LISP_AFI_NAME ) :
packet = packet [ 2 : : ]
packet , I1I1iiI . rloc_name = lisp_decode_dist_name ( packet )
if 71 - 71: OoO0O00 . i11iIiiIii . iIii1I11I1II1 + I1IiiI - o0oOOo0O0Ooo
if ( packet == None ) : return ( None )
i11iii11 -= len ( I1I1iiI . rloc_name ) + 1 + 2
if 34 - 34: iII111i
if 6 - 6: OoO0O00 . OoOoOO00 + I1ii11iIi11i
if 24 - 24: OoO0O00 . Ii1I
self . rle = II1IIiiI1
self . rle . build_forwarding_list ( )
if 26 - 26: O0 * I1IiiI - OOooOOo * OoooooooOO * II111iiii % OoOoOO00
elif ( o0O00o0o == LISP_LCAF_SECURITY_TYPE ) :
if 56 - 56: OOooOOo * i11iIiiIii % ooOoO0o * OoOoOO00 % Oo0Ooo * IiII
if 30 - 30: i1IIi + o0oOOo0O0Ooo - OoOoOO00 . OOooOOo
if 95 - 95: i1IIi . I11i + O0 . I11i - I11i / Oo0Ooo
if 41 - 41: OoooooooOO . OOooOOo - Ii1I * OoO0O00 % i11iIiiIii
if 7 - 7: Ii1I
oOO = packet
i1II1Ii = lisp_keys ( 1 )
packet = i1II1Ii . decode_lcaf ( oOO , i11iii11 )
if ( packet == None ) : return ( None )
if 16 - 16: IiII * o0oOOo0O0Ooo % II111iiii - II111iiii + ooOoO0o
if 55 - 55: OoO0O00 % OoOoOO00
if 58 - 58: Ii1I
if 17 - 17: OoO0O00 - oO0o % Oo0Ooo % oO0o * I1Ii111 / IiII
iii1IiI = [ LISP_CS_25519_CBC , LISP_CS_25519_CHACHA ]
if ( i1II1Ii . cipher_suite in iii1IiI ) :
if ( i1II1Ii . cipher_suite == LISP_CS_25519_CBC ) :
Iiii11 = lisp_keys ( 1 , do_poly = False , do_chacha = False )
if 88 - 88: ooOoO0o . II111iiii * O0 % IiII
if ( i1II1Ii . cipher_suite == LISP_CS_25519_CHACHA ) :
Iiii11 = lisp_keys ( 1 , do_poly = True , do_chacha = True )
if 15 - 15: O0 % i1IIi - OOooOOo . IiII
else :
Iiii11 = lisp_keys ( 1 , do_poly = False , do_chacha = False )
if 1 - 1: I1IiiI
packet = Iiii11 . decode_lcaf ( oOO , i11iii11 )
if ( packet == None ) : return ( None )
if 40 - 40: o0oOOo0O0Ooo % I11i % O0
if ( len ( packet ) < 2 ) : return ( None )
o0o0O00oOo = struct . unpack ( "H" , packet [ : 2 ] ) [ 0 ]
self . rloc . afi = socket . ntohs ( o0o0O00oOo )
if ( len ( packet ) < self . rloc . addr_length ( ) ) : return ( None )
packet = self . rloc . unpack_address ( packet [ 2 : : ] )
if ( packet == None ) : return ( None )
self . rloc . mask_len = self . rloc . host_mask_len ( )
if 88 - 88: o0oOOo0O0Ooo - oO0o
if 73 - 73: II111iiii
if 7 - 7: O0 / OoO0O00
if 90 - 90: iII111i % oO0o / iIii1I11I1II1
if 52 - 52: I1IiiI / o0oOOo0O0Ooo
if 20 - 20: I1Ii111 . I1IiiI - iIii1I11I1II1 / iII111i
if ( self . rloc . is_null ( ) ) : return ( packet )
if 46 - 46: I1Ii111 . i11iIiiIii
OOO0Oo0Oo = self . rloc_name
if ( OOO0Oo0Oo ) : OOO0Oo0Oo = blue ( self . rloc_name , False )
if 52 - 52: o0oOOo0O0Ooo * O0 + I1ii11iIi11i
if 83 - 83: I11i + OOooOOo - OoooooooOO
if 7 - 7: IiII % ooOoO0o / OoooooooOO / o0oOOo0O0Ooo + OoO0O00 - OoO0O00
if 15 - 15: i1IIi + OOooOOo / Ii1I
if 51 - 51: OOooOOo + O0
if 91 - 91: i11iIiiIii + o0oOOo0O0Ooo % OoO0O00 / oO0o - i1IIi
iiiiI = self . keys [ 1 ] if self . keys else None
if ( iiiiI == None ) :
if ( Iiii11 . remote_public_key == None ) :
O0I11IIIII = bold ( "No remote encap-public-key supplied" , False )
lprint ( " {} for {}" . format ( O0I11IIIII , OOO0Oo0Oo ) )
Iiii11 = None
else :
O0I11IIIII = bold ( "New encap-keying with new state" , False )
lprint ( " {} for {}" . format ( O0I11IIIII , OOO0Oo0Oo ) )
Iiii11 . compute_shared_key ( "encap" )
if 82 - 82: Ii1I . OoooooooOO + OoooooooOO % OoO0O00 % I1ii11iIi11i
if 65 - 65: Oo0Ooo . I11i
if 7 - 7: Oo0Ooo * II111iiii
if 11 - 11: OoOoOO00 % OoooooooOO
if 92 - 92: OoOoOO00 - iII111i * Ii1I - i1IIi
if 87 - 87: Ii1I * I1Ii111 + iIii1I11I1II1 * o0oOOo0O0Ooo * iIii1I11I1II1 . I11i
if 66 - 66: Ii1I / OoO0O00 . O0 . I11i % OoooooooOO / OOooOOo
if 49 - 49: I1IiiI * iII111i - OoO0O00 % Ii1I + Ii1I * I1Ii111
if 94 - 94: OoOoOO00 - I11i + Ii1I + OoOoOO00 + II111iiii
if 61 - 61: IiII + Ii1I / oO0o . OoooooooOO + iII111i
if ( iiiiI ) :
if ( Iiii11 . remote_public_key == None ) :
Iiii11 = None
IIiI = bold ( "Remote encap-unkeying occurred" , False )
lprint ( " {} for {}" . format ( IIiI , OOO0Oo0Oo ) )
elif ( iiiiI . compare_keys ( Iiii11 ) ) :
Iiii11 = iiiiI
lprint ( " Maintain stored encap-keys for {}" . format ( OOO0Oo0Oo ) )
if 29 - 29: OOooOOo
else :
if ( iiiiI . remote_public_key == None ) :
O0I11IIIII = "New encap-keying for existing state"
else :
O0I11IIIII = "Remote encap-rekeying"
if 69 - 69: oO0o % OoooooooOO * iII111i
lprint ( " {} for {}" . format ( bold ( O0I11IIIII , False ) ,
OOO0Oo0Oo ) )
iiiiI . remote_public_key = Iiii11 . remote_public_key
iiiiI . compute_shared_key ( "encap" )
Iiii11 = iiiiI
if 58 - 58: oO0o / i11iIiiIii . OoOoOO00 % O0 / iIii1I11I1II1
if 50 - 50: I1Ii111 . I11i / O0 . I11i
self . keys = [ None , Iiii11 , None , None ]
if 91 - 91: i11iIiiIii . I1ii11iIi11i + I11i
else :
if 67 - 67: I1ii11iIi11i * I1Ii111 * I1IiiI / I11i - IiII + oO0o
if 11 - 11: O0 + i1IIi / o0oOOo0O0Ooo * OoO0O00
if 64 - 64: i1IIi % IiII . ooOoO0o . iIii1I11I1II1 + OoO0O00 - iIii1I11I1II1
if 52 - 52: II111iiii - IiII
packet = packet [ i11iii11 : : ]
if 91 - 91: iIii1I11I1II1 + iII111i . I11i % i11iIiiIii - i11iIiiIii + I1IiiI
return ( packet )
if 75 - 75: I1ii11iIi11i / I1IiiI - iIii1I11I1II1 / OoO0O00 * OOooOOo
if 73 - 73: OoooooooOO % IiII / I1Ii111 * I11i + i1IIi % i11iIiiIii
def decode ( self , packet , nonce ) :
IIiI1I11ii1i = "BBBBHH"
i1II1i1iiI1 = struct . calcsize ( IIiI1I11ii1i )
if ( len ( packet ) < i1II1i1iiI1 ) : return ( None )
if 91 - 91: i11iIiiIii
self . priority , self . weight , self . mpriority , self . mweight , I11I , o0o0O00oOo = struct . unpack ( IIiI1I11ii1i , packet [ : i1II1i1iiI1 ] )
if 6 - 6: O0 - iIii1I11I1II1 + I1Ii111 . o0oOOo0O0Ooo * i11iIiiIii
if 53 - 53: OOooOOo / I1IiiI / oO0o * OOooOOo / i1IIi - I1Ii111
I11I = socket . ntohs ( I11I )
o0o0O00oOo = socket . ntohs ( o0o0O00oOo )
self . local_bit = True if ( I11I & 0x0004 ) else False
self . probe_bit = True if ( I11I & 0x0002 ) else False
self . reach_bit = True if ( I11I & 0x0001 ) else False
if 71 - 71: O0 + Oo0Ooo % oO0o - o0oOOo0O0Ooo
if ( o0o0O00oOo == LISP_AFI_LCAF ) :
packet = packet [ i1II1i1iiI1 - 2 : : ]
packet = self . decode_lcaf ( packet , nonce )
else :
self . rloc . afi = o0o0O00oOo
packet = packet [ i1II1i1iiI1 : : ]
packet = self . rloc . unpack_address ( packet )
if 82 - 82: iIii1I11I1II1
self . rloc . mask_len = self . rloc . host_mask_len ( )
return ( packet )
if 64 - 64: ooOoO0o + I1IiiI % OOooOOo + II111iiii
if 46 - 46: I1IiiI
def end_of_rlocs ( self , packet , rloc_count ) :
for II11iIII1i1I in range ( rloc_count ) :
packet = self . decode ( packet , None )
if ( packet == None ) : return ( None )
if 72 - 72: iII111i
return ( packet )
if 100 - 100: I1IiiI
if 55 - 55: i1IIi % IiII
if 44 - 44: oO0o - iIii1I11I1II1 / ooOoO0o - iIii1I11I1II1 % i1IIi + ooOoO0o
if 74 - 74: I11i . OoOoOO00 + OoOoOO00
if 87 - 87: IiII + o0oOOo0O0Ooo . i1IIi % I1Ii111
if 44 - 44: Oo0Ooo - OOooOOo . Ii1I * OoooooooOO
if 93 - 93: OoO0O00 . OoO0O00
if 52 - 52: OOooOOo . oO0o / Oo0Ooo . OoooooooOO % I1ii11iIi11i
if 65 - 65: ooOoO0o % II111iiii . iII111i - iIii1I11I1II1 - I1IiiI
if 63 - 63: I1IiiI . OoOoOO00 - II111iiii
if 55 - 55: ooOoO0o - o0oOOo0O0Ooo
if 32 - 32: I1Ii111 * Ii1I / I1Ii111 . OoOoOO00 + I1ii11iIi11i - ooOoO0o
if 14 - 14: IiII * O0 + O0 - ooOoO0o . i11iIiiIii - IiII
if 37 - 37: I11i
if 19 - 19: OoooooooOO % I1Ii111
if 57 - 57: OoOoOO00 + i1IIi . iIii1I11I1II1 . iIii1I11I1II1 / iIii1I11I1II1 % oO0o
if 7 - 7: i11iIiiIii * I1ii11iIi11i / OoO0O00 * oO0o
if 35 - 35: IiII . i1IIi + I1ii11iIi11i . IiII + ooOoO0o . oO0o
if 2 - 2: II111iiii
if 18 - 18: iIii1I11I1II1 % I1ii11iIi11i % Oo0Ooo
if 47 - 47: ooOoO0o - I1IiiI % OOooOOo * Ii1I % I1IiiI
if 95 - 95: OoO0O00 + OoOoOO00 % Oo0Ooo . Ii1I * I1IiiI + I1Ii111
if 22 - 22: Oo0Ooo . OoO0O00
if 55 - 55: Oo0Ooo % OoooooooOO * II111iiii % OoooooooOO
if 30 - 30: I1Ii111 / o0oOOo0O0Ooo + OoooooooOO + OoOoOO00 + OoO0O00
if 40 - 40: OoooooooOO / IiII
if 82 - 82: i11iIiiIii - oO0o - i1IIi
if 78 - 78: oO0o % iII111i / i1IIi / ooOoO0o
if 44 - 44: o0oOOo0O0Ooo + Ii1I + I1IiiI % O0
if 100 - 100: OoooooooOO
class lisp_map_referral ( ) :
def __init__ ( self ) :
self . record_count = 0
self . nonce = 0
if 27 - 27: i11iIiiIii % II111iiii + I1Ii111
if 76 - 76: OOooOOo - I1Ii111 + iIii1I11I1II1 + I1IiiI * oO0o
def print_map_referral ( self ) :
lprint ( "{} -> record-count: {}, nonce: 0x{}" . format ( bold ( "Map-Referral" , False ) , self . record_count ,
# ooOoO0o % I1ii11iIi11i . OoO0O00 . ooOoO0o + i11iIiiIii . iIii1I11I1II1
lisp_hex_string ( self . nonce ) ) )
if 70 - 70: ooOoO0o
if 3 - 3: I1IiiI - I1IiiI
def encode ( self ) :
O0oooOO = ( LISP_MAP_REFERRAL << 28 ) | self . record_count
oOo = struct . pack ( "I" , socket . htonl ( O0oooOO ) )
oOo += struct . pack ( "Q" , self . nonce )
return ( oOo )
if 89 - 89: OoOoOO00
if 27 - 27: i1IIi % OoOoOO00 / Ii1I * Ii1I / I11i
def decode ( self , packet ) :
IIiI1I11ii1i = "I"
i1II1i1iiI1 = struct . calcsize ( IIiI1I11ii1i )
if ( len ( packet ) < i1II1i1iiI1 ) : return ( None )
if 11 - 11: OOooOOo
O0oooOO = struct . unpack ( IIiI1I11ii1i , packet [ : i1II1i1iiI1 ] )
O0oooOO = socket . ntohl ( O0oooOO [ 0 ] )
self . record_count = O0oooOO & 0xff
packet = packet [ i1II1i1iiI1 : : ]
if 58 - 58: OoO0O00 * OoooooooOO
IIiI1I11ii1i = "Q"
i1II1i1iiI1 = struct . calcsize ( IIiI1I11ii1i )
if ( len ( packet ) < i1II1i1iiI1 ) : return ( None )
if 47 - 47: iII111i - Oo0Ooo
self . nonce = struct . unpack ( IIiI1I11ii1i , packet [ : i1II1i1iiI1 ] ) [ 0 ]
packet = packet [ i1II1i1iiI1 : : ]
return ( packet )
if 19 - 19: O0 . i1IIi + I11i / II111iiii + ooOoO0o
if 26 - 26: Ii1I * oO0o % I1IiiI - OOooOOo . I1Ii111
if 35 - 35: i1IIi % i11iIiiIii + Ii1I
if 14 - 14: OoO0O00 * OoooooooOO
if 45 - 45: iIii1I11I1II1 * I1IiiI . OoOoOO00
if 97 - 97: I11i % II111iiii % Ii1I . II111iiii . iIii1I11I1II1
if 98 - 98: i11iIiiIii + O0 - O0 - iII111i
if 25 - 25: oO0o / O0 + I1Ii111 % i11iIiiIii / I1IiiI
class lisp_ddt_entry ( ) :
def __init__ ( self ) :
self . eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . uptime = lisp_get_timestamp ( )
self . delegation_set = [ ]
self . source_cache = None
self . map_referrals_sent = 0
if 62 - 62: iII111i . I11i * i1IIi + iII111i
if 95 - 95: Ii1I / o0oOOo0O0Ooo % ooOoO0o - I1IiiI / OOooOOo * OOooOOo
def is_auth_prefix ( self ) :
if ( len ( self . delegation_set ) != 0 ) : return ( False )
if ( self . is_star_g ( ) ) : return ( False )
return ( True )
if 6 - 6: OoO0O00 % IiII + iIii1I11I1II1
if 18 - 18: II111iiii . Ii1I + OoOoOO00 + O0 - I11i
def is_ms_peer_entry ( self ) :
if ( len ( self . delegation_set ) == 0 ) : return ( False )
return ( self . delegation_set [ 0 ] . is_ms_peer ( ) )
if 30 - 30: II111iiii
if 26 - 26: I11i - i1IIi - Oo0Ooo * O0 * OOooOOo . OoooooooOO
def print_referral_type ( self ) :
if ( len ( self . delegation_set ) == 0 ) : return ( "unknown" )
oOoI1I = self . delegation_set [ 0 ]
return ( oOoI1I . print_node_type ( ) )
if 13 - 13: iII111i * i1IIi * iIii1I11I1II1 . OOooOOo + O0 . o0oOOo0O0Ooo
if 23 - 23: I1ii11iIi11i . I1ii11iIi11i / I1IiiI . i1IIi
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 47 - 47: i11iIiiIii . o0oOOo0O0Ooo . i11iIiiIii + I1IiiI - I1ii11iIi11i
if 62 - 62: OoooooooOO + I1IiiI / ooOoO0o . Ii1I . Oo0Ooo
def add_cache ( self ) :
if ( self . group . is_null ( ) ) :
lisp_ddt_cache . add_cache ( self . eid , self )
else :
oO00oOo = lisp_ddt_cache . lookup_cache ( self . group , True )
if ( oO00oOo == None ) :
oO00oOo = lisp_ddt_entry ( )
oO00oOo . eid . copy_address ( self . group )
oO00oOo . group . copy_address ( self . group )
lisp_ddt_cache . add_cache ( self . group , oO00oOo )
if 60 - 60: Oo0Ooo + Ii1I / oO0o / I11i
if ( self . eid . is_null ( ) ) : self . eid . make_default_route ( oO00oOo . group )
oO00oOo . add_source_entry ( self )
if 21 - 21: OOooOOo % O0 / I11i
if 15 - 15: O0 - i1IIi . iIii1I11I1II1 - i11iIiiIii / Ii1I
if 11 - 11: iIii1I11I1II1 + I1IiiI
def add_source_entry ( self , source_ddt ) :
if ( self . source_cache == None ) : self . source_cache = lisp_cache ( )
self . source_cache . add_cache ( source_ddt . eid , source_ddt )
if 15 - 15: o0oOOo0O0Ooo
if 55 - 55: i11iIiiIii / OoooooooOO - I11i
def lookup_source_cache ( self , source , exact ) :
if ( self . source_cache == None ) : return ( None )
return ( self . source_cache . lookup_cache ( source , exact ) )
if 89 - 89: I11i - i1IIi - i1IIi * OOooOOo - O0
if 94 - 94: Oo0Ooo / I11i . I1ii11iIi11i
def is_star_g ( self ) :
if ( self . group . is_null ( ) ) : return ( False )
return ( self . eid . is_exact_match ( self . group ) )
if 31 - 31: i11iIiiIii + iIii1I11I1II1 . II111iiii
if 72 - 72: I1Ii111 * OoO0O00 + Oo0Ooo / Ii1I % OOooOOo
if 84 - 84: OoOoOO00 / o0oOOo0O0Ooo
class lisp_ddt_node ( ) :
def __init__ ( self ) :
self . delegate_address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . public_key = ""
self . map_server_peer = False
self . map_server_child = False
self . priority = 0
self . weight = 0
if 9 - 9: Ii1I
if 76 - 76: I1IiiI % Oo0Ooo / iIii1I11I1II1 - Oo0Ooo
def print_node_type ( self ) :
if ( self . is_ddt_child ( ) ) : return ( "ddt-child" )
if ( self . is_ms_child ( ) ) : return ( "map-server-child" )
if ( self . is_ms_peer ( ) ) : return ( "map-server-peer" )
if 34 - 34: OoOoOO00 - i1IIi + OOooOOo + Ii1I . o0oOOo0O0Ooo
if 42 - 42: OoO0O00
def is_ddt_child ( self ) :
if ( self . map_server_child ) : return ( False )
if ( self . map_server_peer ) : return ( False )
return ( True )
if 59 - 59: OoO0O00 . I1Ii111 % OoO0O00
if 22 - 22: Oo0Ooo
def is_ms_child ( self ) :
return ( self . map_server_child )
if 21 - 21: o0oOOo0O0Ooo
if 86 - 86: ooOoO0o / iIii1I11I1II1 . OOooOOo
def is_ms_peer ( self ) :
return ( self . map_server_peer )
if 93 - 93: Oo0Ooo / II111iiii . Oo0Ooo + i1IIi + i1IIi
if 30 - 30: OoOoOO00 . OOooOOo % OOooOOo / II111iiii + i1IIi
if 61 - 61: i1IIi % II111iiii * II111iiii . o0oOOo0O0Ooo / I1ii11iIi11i - I1Ii111
if 93 - 93: Ii1I - i1IIi
if 3 - 3: oO0o + OoO0O00 - iII111i / Ii1I
if 58 - 58: Ii1I * I11i
if 95 - 95: oO0o
class lisp_ddt_map_request ( ) :
def __init__ ( self , lisp_sockets , packet , eid , group , nonce ) :
self . uptime = lisp_get_timestamp ( )
self . lisp_sockets = lisp_sockets
self . packet = packet
self . eid = eid
self . group = group
self . nonce = nonce
self . mr_source = None
self . sport = 0
self . itr = None
self . retry_count = 0
self . send_count = 0
self . retransmit_timer = None
self . last_request_sent_to = None
self . from_pitr = False
self . tried_root = False
self . last_cached_prefix = [ None , None ]
if 49 - 49: I1IiiI
if 23 - 23: I1Ii111
def print_ddt_map_request ( self ) :
lprint ( "Queued Map-Request from {}ITR {}->{}, nonce 0x{}" . format ( "P" if self . from_pitr else "" ,
# I11i
red ( self . itr . print_address ( ) , False ) ,
green ( self . eid . print_address ( ) , False ) , self . nonce ) )
if 69 - 69: OoOoOO00 . OoooooooOO . o0oOOo0O0Ooo + i11iIiiIii
if 54 - 54: ooOoO0o - O0 + iII111i
def queue_map_request ( self ) :
self . retransmit_timer = threading . Timer ( LISP_DDT_MAP_REQUEST_INTERVAL ,
lisp_retransmit_ddt_map_request , [ self ] )
self . retransmit_timer . start ( )
lisp_ddt_map_requestQ [ str ( self . nonce ) ] = self
if 34 - 34: Ii1I - OOooOOo % iII111i
if 48 - 48: oO0o - O0
def dequeue_map_request ( self ) :
self . retransmit_timer . cancel ( )
if ( lisp_ddt_map_requestQ . has_key ( str ( self . nonce ) ) ) :
lisp_ddt_map_requestQ . pop ( str ( self . nonce ) )
if 17 - 17: iIii1I11I1II1 . IiII / ooOoO0o % I11i + o0oOOo0O0Ooo - iIii1I11I1II1
if 95 - 95: OoOoOO00 + OOooOOo - I11i * i1IIi + i1IIi * O0
if 60 - 60: Oo0Ooo + I11i % iIii1I11I1II1 % oO0o - I1Ii111 / o0oOOo0O0Ooo
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 9 - 9: IiII / oO0o % O0 * I1Ii111 - iIii1I11I1II1 % i1IIi
if 83 - 83: OoOoOO00 + OOooOOo / OoooooooOO
if 39 - 39: OoO0O00 % iII111i . oO0o . II111iiii - i11iIiiIii
if 85 - 85: O0 - OoOoOO00
if 17 - 17: o0oOOo0O0Ooo / i1IIi / OOooOOo
if 91 - 91: I1ii11iIi11i / Ii1I - OoOoOO00 . I11i / oO0o
if 16 - 16: IiII % iII111i . oO0o . I1IiiI % O0 * I11i
if 99 - 99: OoOoOO00 / OoooooooOO + iII111i * I11i * i11iIiiIii + OOooOOo
if 40 - 40: II111iiii / I11i % I1IiiI - O0
if 39 - 39: i11iIiiIii - OoOoOO00 % OOooOOo + ooOoO0o + i11iIiiIii
if 59 - 59: IiII / OoOoOO00 - I1Ii111 - ooOoO0o . oO0o
if 87 - 87: oO0o + I1IiiI * I1Ii111 * o0oOOo0O0Ooo + O0
if 21 - 21: I1Ii111 + OoOoOO00 + OoOoOO00 . II111iiii / I1Ii111 . I1IiiI
if 66 - 66: I1Ii111 % oO0o . iII111i * i1IIi
if 81 - 81: OoooooooOO * I1IiiI / I1Ii111
if 10 - 10: I1IiiI - II111iiii / IiII * II111iiii
if 67 - 67: II111iiii . Ii1I % oO0o . Oo0Ooo + IiII
if 10 - 10: OOooOOo - OoO0O00 * oO0o / iIii1I11I1II1 - OoOoOO00
if 20 - 20: IiII % I1IiiI + iIii1I11I1II1 % iII111i
if 100 - 100: o0oOOo0O0Ooo - Oo0Ooo % I1Ii111 . i11iIiiIii % OoooooooOO
LISP_DDT_ACTION_SITE_NOT_FOUND = - 2
LISP_DDT_ACTION_NULL = - 1
LISP_DDT_ACTION_NODE_REFERRAL = 0
LISP_DDT_ACTION_MS_REFERRAL = 1
LISP_DDT_ACTION_MS_ACK = 2
LISP_DDT_ACTION_MS_NOT_REG = 3
LISP_DDT_ACTION_DELEGATION_HOLE = 4
LISP_DDT_ACTION_NOT_AUTH = 5
LISP_DDT_ACTION_MAX = LISP_DDT_ACTION_NOT_AUTH
if 39 - 39: I1ii11iIi11i / i11iIiiIii * i1IIi * Oo0Ooo
lisp_map_referral_action_string = [
"node-referral" , "ms-referral" , "ms-ack" , "ms-not-registered" ,
"delegation-hole" , "not-authoritative" ]
if 39 - 39: OoO0O00 * OoooooooOO / i1IIi + Oo0Ooo
if 57 - 57: O0
if 83 - 83: OOooOOo / Ii1I * I1IiiI % oO0o / iIii1I11I1II1
if 1 - 1: I11i / OoooooooOO / iII111i
if 68 - 68: i1IIi / Oo0Ooo / I11i * Oo0Ooo
if 91 - 91: OoO0O00 . iII111i
if 82 - 82: I1ii11iIi11i / Oo0Ooo
if 63 - 63: I1IiiI
if 3 - 3: iII111i + I1ii11iIi11i
if 35 - 35: oO0o * iII111i * oO0o * I1Ii111 * IiII * i1IIi
if 43 - 43: OoO0O00 * I1IiiI / IiII . i11iIiiIii + iII111i + o0oOOo0O0Ooo
if 1 - 1: I1IiiI % o0oOOo0O0Ooo . I1Ii111 + I11i * oO0o
if 41 - 41: OoO0O00 * oO0o - II111iiii
if 2 - 2: IiII + IiII - OoO0O00 * iII111i . oO0o
if 91 - 91: ooOoO0o
if 22 - 22: ooOoO0o % OoO0O00 * OoOoOO00 + Oo0Ooo
if 44 - 44: O0 - I11i
if 43 - 43: O0
if 50 - 50: I11i - OoooooooOO
if 29 - 29: oO0o * oO0o
if 44 - 44: ooOoO0o . I1IiiI * oO0o * Ii1I
if 41 - 41: i1IIi % i11iIiiIii + I11i % OoooooooOO / I1ii11iIi11i
if 8 - 8: OoooooooOO - OoO0O00 / i11iIiiIii / O0 . IiII
if 86 - 86: ooOoO0o * OoooooooOO + iII111i + o0oOOo0O0Ooo
if 79 - 79: i1IIi % I1ii11iIi11i - OoO0O00 % I1ii11iIi11i
if 6 - 6: Oo0Ooo / iII111i . i11iIiiIii
if 8 - 8: I1ii11iIi11i + O0 - oO0o % II111iiii . I1Ii111
if 86 - 86: IiII
if 71 - 71: Ii1I - i1IIi . I1IiiI
if 15 - 15: i1IIi % II111iiii / II111iiii - I1ii11iIi11i - I11i % i1IIi
if 54 - 54: i1IIi . OoO0O00 + iII111i + OoO0O00 * i1IIi
if 13 - 13: Oo0Ooo / OoO0O00 + OOooOOo
if 90 - 90: OoO0O00 * i11iIiiIii / oO0o
if 91 - 91: iII111i - OoOoOO00 / Oo0Ooo % II111iiii / II111iiii / o0oOOo0O0Ooo
if 34 - 34: OoO0O00 * II111iiii + i11iIiiIii % Ii1I
if 25 - 25: OoOoOO00 + IiII . i11iIiiIii
if 87 - 87: I1IiiI + OoooooooOO + O0
if 32 - 32: Ii1I / I1ii11iIi11i . Ii1I
if 65 - 65: IiII
if 74 - 74: Oo0Ooo + i1IIi - II111iiii / ooOoO0o / iII111i
if 66 - 66: ooOoO0o / IiII * iIii1I11I1II1
if 42 - 42: I1Ii111 - i11iIiiIii % II111iiii * ooOoO0o . O0 % I11i
if 82 - 82: Oo0Ooo % O0 + I1ii11iIi11i % I1ii11iIi11i
if 74 - 74: O0 * IiII . I11i - I1Ii111 + O0 + I11i
if 48 - 48: oO0o . o0oOOo0O0Ooo - OOooOOo
if 29 - 29: Oo0Ooo - Ii1I - Oo0Ooo
if 89 - 89: Oo0Ooo . OoO0O00 . I1ii11iIi11i * oO0o . O0
if 72 - 72: i11iIiiIii % I11i / I1Ii111 + I1IiiI * iII111i
if 69 - 69: I1Ii111 + O0 . IiII . o0oOOo0O0Ooo
if 38 - 38: IiII / i1IIi
if 60 - 60: OoOoOO00
if 75 - 75: II111iiii / iIii1I11I1II1 / OoooooooOO
if 61 - 61: IiII . IiII
class lisp_info ( ) :
def __init__ ( self ) :
self . info_reply = False
self . nonce = 0
self . private_etr_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . global_etr_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . global_ms_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . ms_port = 0
self . etr_port = 0
self . rtr_list = [ ]
self . hostname = lisp_hostname
if 17 - 17: OoOoOO00 % Oo0Ooo / I1Ii111 . Ii1I % OoO0O00
if 32 - 32: I1IiiI + ooOoO0o / O0 * i11iIiiIii % Oo0Ooo + II111iiii
def print_info ( self ) :
if ( self . info_reply ) :
o0O00 = "Info-Reply"
Oo0o0o0oo = ( ", ms-port: {}, etr-port: {}, global-rloc: {}, " + "ms-rloc: {}, private-rloc: {}, RTR-list: " ) . format ( self . ms_port , self . etr_port ,
# OoooooooOO + I1ii11iIi11i . IiII / O0 % I1ii11iIi11i
# Ii1I * O0
red ( self . global_etr_rloc . print_address_no_iid ( ) , False ) ,
red ( self . global_ms_rloc . print_address_no_iid ( ) , False ) ,
red ( self . private_etr_rloc . print_address_no_iid ( ) , False ) )
if ( len ( self . rtr_list ) == 0 ) : Oo0o0o0oo += "empty, "
for Ii111iI1iI1ii in self . rtr_list :
Oo0o0o0oo += red ( Ii111iI1iI1ii . print_address_no_iid ( ) , False ) + ", "
if 54 - 54: IiII . I11i % ooOoO0o * II111iiii . II111iiii + I11i
Oo0o0o0oo = Oo0o0o0oo [ 0 : - 2 ]
else :
o0O00 = "Info-Request"
ooOOO00000oo = "<none>" if self . hostname == None else self . hostname
Oo0o0o0oo = ", hostname: {}" . format ( blue ( ooOOO00000oo , False ) )
if 32 - 32: O0 / I11i . O0
lprint ( "{} -> nonce: 0x{}{}" . format ( bold ( o0O00 , False ) ,
lisp_hex_string ( self . nonce ) , Oo0o0o0oo ) )
if 25 - 25: Oo0Ooo - iII111i
if 96 - 96: O0 . I1IiiI
def encode ( self ) :
O0oooOO = ( LISP_NAT_INFO << 28 )
if ( self . info_reply ) : O0oooOO |= ( 1 << 27 )
if 2 - 2: I11i . oO0o * IiII
if 41 - 41: Ii1I / OoO0O00 / OoO0O00 * I11i
if 31 - 31: Ii1I / OoooooooOO % iIii1I11I1II1 - IiII * I1IiiI - O0
if 31 - 31: oO0o
if 74 - 74: OoO0O00
oOo = struct . pack ( "I" , socket . htonl ( O0oooOO ) )
oOo += struct . pack ( "Q" , self . nonce )
oOo += struct . pack ( "III" , 0 , 0 , 0 )
if 11 - 11: oO0o + O0 % Ii1I . I11i * o0oOOo0O0Ooo
if 14 - 14: I11i . iIii1I11I1II1 + I1Ii111 % OoooooooOO
if 9 - 9: oO0o + Ii1I / I1ii11iIi11i * iIii1I11I1II1 + o0oOOo0O0Ooo
if 64 - 64: I11i % i11iIiiIii % I1ii11iIi11i
if ( self . info_reply == False ) :
if ( self . hostname == None ) :
oOo += struct . pack ( "H" , 0 )
else :
oOo += struct . pack ( "H" , socket . htons ( LISP_AFI_NAME ) )
oOo += self . hostname + "\0"
if 14 - 14: I1Ii111 - OoOoOO00 - I1ii11iIi11i % I11i + OoooooooOO
return ( oOo )
if 4 - 4: I1Ii111 - I1IiiI / iIii1I11I1II1 + I1ii11iIi11i % iIii1I11I1II1 * I1IiiI
if 30 - 30: i11iIiiIii % OOooOOo
if 52 - 52: I11i - oO0o . i11iIiiIii - II111iiii + Ii1I . iII111i
if 27 - 27: I1IiiI + OoOoOO00 + iII111i
if 70 - 70: I11i + IiII . ooOoO0o - I1ii11iIi11i
o0o0O00oOo = socket . htons ( LISP_AFI_LCAF )
o0O00o0o = LISP_LCAF_NAT_TYPE
i11iii11 = socket . htons ( 16 )
iiiIII1iII = socket . htons ( self . ms_port )
oO00O0oO0Oo0 = socket . htons ( self . etr_port )
oOo += struct . pack ( "HHBBHHHH" , o0o0O00oOo , 0 , o0O00o0o , 0 , i11iii11 ,
iiiIII1iII , oO00O0oO0Oo0 , socket . htons ( self . global_etr_rloc . afi ) )
oOo += self . global_etr_rloc . pack_address ( )
oOo += struct . pack ( "HH" , 0 , socket . htons ( self . private_etr_rloc . afi ) )
oOo += self . private_etr_rloc . pack_address ( )
if ( len ( self . rtr_list ) == 0 ) : oOo += struct . pack ( "H" , 0 )
if 85 - 85: O0
if 10 - 10: Oo0Ooo / OoOoOO00 * OOooOOo - IiII + Ii1I
if 62 - 62: I1IiiI . Ii1I
if 74 - 74: Ii1I - I11i % ooOoO0o - I1IiiI - Ii1I - II111iiii
for Ii111iI1iI1ii in self . rtr_list :
oOo += struct . pack ( "H" , socket . htons ( Ii111iI1iI1ii . afi ) )
oOo += Ii111iI1iI1ii . pack_address ( )
if 81 - 81: i1IIi * I1ii11iIi11i + IiII - OoO0O00 * i1IIi
return ( oOo )
if 6 - 6: iIii1I11I1II1 % OoOoOO00 % II111iiii % o0oOOo0O0Ooo
if 52 - 52: Ii1I - I1IiiI * iIii1I11I1II1 % Oo0Ooo * OOooOOo
def decode ( self , packet ) :
oOO = packet
IIiI1I11ii1i = "I"
i1II1i1iiI1 = struct . calcsize ( IIiI1I11ii1i )
if ( len ( packet ) < i1II1i1iiI1 ) : return ( None )
if 67 - 67: OoooooooOO * I11i * Ii1I * iIii1I11I1II1
O0oooOO = struct . unpack ( IIiI1I11ii1i , packet [ : i1II1i1iiI1 ] )
O0oooOO = O0oooOO [ 0 ]
packet = packet [ i1II1i1iiI1 : : ]
if 22 - 22: OoO0O00 / o0oOOo0O0Ooo
IIiI1I11ii1i = "Q"
i1II1i1iiI1 = struct . calcsize ( IIiI1I11ii1i )
if ( len ( packet ) < i1II1i1iiI1 ) : return ( None )
if 35 - 35: I1Ii111 / I1Ii111 + o0oOOo0O0Ooo - oO0o
i11III1I = struct . unpack ( IIiI1I11ii1i , packet [ : i1II1i1iiI1 ] )
if 40 - 40: OoOoOO00 - II111iiii
O0oooOO = socket . ntohl ( O0oooOO )
self . nonce = i11III1I [ 0 ]
self . info_reply = O0oooOO & 0x08000000
self . hostname = None
packet = packet [ i1II1i1iiI1 : : ]
if 29 - 29: I1IiiI - O0
if 36 - 36: I1IiiI * I1IiiI
if 79 - 79: I1Ii111 - I11i
if 49 - 49: II111iiii + O0 * ooOoO0o - Oo0Ooo
if 89 - 89: I1IiiI + I11i . oO0o . II111iiii + oO0o / Oo0Ooo
IIiI1I11ii1i = "HH"
i1II1i1iiI1 = struct . calcsize ( IIiI1I11ii1i )
if ( len ( packet ) < i1II1i1iiI1 ) : return ( None )
if 32 - 32: OoO0O00 % oO0o * I1ii11iIi11i + I11i / I1Ii111
if 5 - 5: o0oOOo0O0Ooo + iII111i / OoooooooOO + Ii1I . OoOoOO00 / oO0o
if 18 - 18: II111iiii . o0oOOo0O0Ooo
if 75 - 75: OoooooooOO - Oo0Ooo
if 56 - 56: II111iiii - i11iIiiIii - oO0o . o0oOOo0O0Ooo
OoooOOo0oOO , ooooO000 = struct . unpack ( IIiI1I11ii1i , packet [ : i1II1i1iiI1 ] )
if ( ooooO000 != 0 ) : return ( None )
if 4 - 4: i1IIi
packet = packet [ i1II1i1iiI1 : : ]
IIiI1I11ii1i = "IBBH"
i1II1i1iiI1 = struct . calcsize ( IIiI1I11ii1i )
if ( len ( packet ) < i1II1i1iiI1 ) : return ( None )
if 91 - 91: IiII . OoO0O00 * Ii1I / o0oOOo0O0Ooo
Ii1 , I1IiII , IIiiiII , I1I1Iiii11iIi = struct . unpack ( IIiI1I11ii1i ,
packet [ : i1II1i1iiI1 ] )
if 70 - 70: I1IiiI % oO0o + iII111i % i11iIiiIii + ooOoO0o
if ( I1I1Iiii11iIi != 0 ) : return ( None )
packet = packet [ i1II1i1iiI1 : : ]
if 88 - 88: I11i * oO0o * I1ii11iIi11i - OOooOOo * IiII + o0oOOo0O0Ooo
if 9 - 9: OoooooooOO
if 26 - 26: OoOoOO00 + II111iiii - OoO0O00 + iII111i - iII111i % O0
if 79 - 79: iIii1I11I1II1 - OoOoOO00 - O0 + I1ii11iIi11i
if ( self . info_reply == False ) :
IIiI1I11ii1i = "H"
i1II1i1iiI1 = struct . calcsize ( IIiI1I11ii1i )
if ( len ( packet ) >= i1II1i1iiI1 ) :
o0o0O00oOo = struct . unpack ( IIiI1I11ii1i , packet [ : i1II1i1iiI1 ] ) [ 0 ]
if ( socket . ntohs ( o0o0O00oOo ) == LISP_AFI_NAME ) :
packet = packet [ i1II1i1iiI1 : : ]
packet , self . hostname = lisp_decode_dist_name ( packet )
if 69 - 69: oO0o % OoooooooOO
if 21 - 21: I1Ii111
return ( oOO )
if 62 - 62: Ii1I % o0oOOo0O0Ooo
if 65 - 65: OoO0O00 + Oo0Ooo + IiII / OoOoOO00
if 37 - 37: oO0o - I11i
if 64 - 64: OoO0O00 * OoOoOO00
if 50 - 50: I1ii11iIi11i + I11i * iII111i
IIiI1I11ii1i = "HHBBHHH"
i1II1i1iiI1 = struct . calcsize ( IIiI1I11ii1i )
if ( len ( packet ) < i1II1i1iiI1 ) : return ( None )
if 27 - 27: OoOoOO00 * OOooOOo * iIii1I11I1II1 / i1IIi
o0o0O00oOo , IiiIii1111Ii1I1 , o0O00o0o , I1IiII , i11iii11 , iiiIII1iII , oO00O0oO0Oo0 = struct . unpack ( IIiI1I11ii1i , packet [ : i1II1i1iiI1 ] )
if 60 - 60: OOooOOo * I1Ii111 . oO0o
if 47 - 47: oO0o % OOooOOo / OOooOOo % OoOoOO00 % I1Ii111 / OoOoOO00
if ( socket . ntohs ( o0o0O00oOo ) != LISP_AFI_LCAF ) : return ( None )
if 51 - 51: I1IiiI . I11i - OoOoOO00
self . ms_port = socket . ntohs ( iiiIII1iII )
self . etr_port = socket . ntohs ( oO00O0oO0Oo0 )
packet = packet [ i1II1i1iiI1 : : ]
if 10 - 10: Oo0Ooo * OOooOOo / IiII . o0oOOo0O0Ooo
if 97 - 97: Ii1I . Ii1I % iII111i
if 49 - 49: Oo0Ooo % OOooOOo - OoooooooOO + IiII
if 54 - 54: iIii1I11I1II1 - OoooooooOO / I11i / oO0o % I1IiiI + OoOoOO00
IIiI1I11ii1i = "H"
i1II1i1iiI1 = struct . calcsize ( IIiI1I11ii1i )
if ( len ( packet ) < i1II1i1iiI1 ) : return ( None )
if 26 - 26: OoO0O00 * II111iiii % OOooOOo * iII111i + iII111i
if 25 - 25: I11i - I1ii11iIi11i
if 100 - 100: I1Ii111 / Ii1I + OoOoOO00 . OoooooooOO
if 83 - 83: O0
o0o0O00oOo = struct . unpack ( IIiI1I11ii1i , packet [ : i1II1i1iiI1 ] ) [ 0 ]
packet = packet [ i1II1i1iiI1 : : ]
if ( o0o0O00oOo != 0 ) :
self . global_etr_rloc . afi = socket . ntohs ( o0o0O00oOo )
packet = self . global_etr_rloc . unpack_address ( packet )
if ( packet == None ) : return ( None )
self . global_etr_rloc . mask_len = self . global_etr_rloc . host_mask_len ( )
if 35 - 35: i11iIiiIii - I11i . OoOoOO00 * II111iiii % i11iIiiIii
if 55 - 55: o0oOOo0O0Ooo / O0 / OoooooooOO * Oo0Ooo % iII111i
if 24 - 24: I1ii11iIi11i % OOooOOo + OoooooooOO + OoO0O00
if 100 - 100: Oo0Ooo % OoO0O00 - OoOoOO00
if 46 - 46: o0oOOo0O0Ooo
if 28 - 28: i1IIi
if ( len ( packet ) < i1II1i1iiI1 ) : return ( oOO )
if 81 - 81: oO0o % OoooooooOO . I1Ii111 - OoOoOO00 / I1IiiI
o0o0O00oOo = struct . unpack ( IIiI1I11ii1i , packet [ : i1II1i1iiI1 ] ) [ 0 ]
packet = packet [ i1II1i1iiI1 : : ]
if ( o0o0O00oOo != 0 ) :
self . global_ms_rloc . afi = socket . ntohs ( o0o0O00oOo )
packet = self . global_ms_rloc . unpack_address ( packet )
if ( packet == None ) : return ( oOO )
self . global_ms_rloc . mask_len = self . global_ms_rloc . host_mask_len ( )
if 62 - 62: I1Ii111 * I11i / I11i
if 42 - 42: ooOoO0o * ooOoO0o / Ii1I / OOooOOo * OOooOOo
if 92 - 92: Oo0Ooo / iII111i - OoooooooOO - o0oOOo0O0Ooo % ooOoO0o
if 35 - 35: i1IIi % iII111i % I11i * iIii1I11I1II1 % Ii1I - Oo0Ooo
if 94 - 94: iII111i
if ( len ( packet ) < i1II1i1iiI1 ) : return ( oOO )
if 68 - 68: OoooooooOO % OOooOOo / OoooooooOO / I1Ii111 + Ii1I - o0oOOo0O0Ooo
o0o0O00oOo = struct . unpack ( IIiI1I11ii1i , packet [ : i1II1i1iiI1 ] ) [ 0 ]
packet = packet [ i1II1i1iiI1 : : ]
if ( o0o0O00oOo != 0 ) :
self . private_etr_rloc . afi = socket . ntohs ( o0o0O00oOo )
packet = self . private_etr_rloc . unpack_address ( packet )
if ( packet == None ) : return ( oOO )
self . private_etr_rloc . mask_len = self . private_etr_rloc . host_mask_len ( )
if 81 - 81: I1IiiI
if 62 - 62: Ii1I * OoOoOO00
if 27 - 27: Oo0Ooo + Oo0Ooo / II111iiii % I1Ii111
if 11 - 11: Ii1I
if 54 - 54: I1IiiI * I1Ii111 / ooOoO0o / iIii1I11I1II1 % iII111i / oO0o
if 11 - 11: ooOoO0o + I1IiiI + Ii1I . II111iiii
while ( len ( packet ) >= i1II1i1iiI1 ) :
o0o0O00oOo = struct . unpack ( IIiI1I11ii1i , packet [ : i1II1i1iiI1 ] ) [ 0 ]
packet = packet [ i1II1i1iiI1 : : ]
if ( o0o0O00oOo == 0 ) : continue
Ii111iI1iI1ii = lisp_address ( socket . ntohs ( o0o0O00oOo ) , "" , 0 , 0 )
packet = Ii111iI1iI1ii . unpack_address ( packet )
if ( packet == None ) : return ( oOO )
Ii111iI1iI1ii . mask_len = Ii111iI1iI1ii . host_mask_len ( )
self . rtr_list . append ( Ii111iI1iI1ii )
if 50 - 50: Oo0Ooo
return ( oOO )
if 14 - 14: O0
if 67 - 67: II111iiii / O0
if 10 - 10: i1IIi / Oo0Ooo
class lisp_nat_info ( ) :
def __init__ ( self , addr_str , hostname , port ) :
self . address = addr_str
self . hostname = hostname
self . port = port
self . uptime = lisp_get_timestamp ( )
if 20 - 20: Oo0Ooo * I1Ii111 / I1ii11iIi11i . ooOoO0o
if 67 - 67: o0oOOo0O0Ooo . Oo0Ooo % I11i
def timed_out ( self ) :
iIIiI1iiI = time . time ( ) - self . uptime
return ( iIIiI1iiI >= ( LISP_INFO_INTERVAL * 2 ) )
if 38 - 38: OOooOOo - OoO0O00 . ooOoO0o
if 50 - 50: o0oOOo0O0Ooo
if 85 - 85: II111iiii . iII111i - i1IIi
class lisp_info_source ( ) :
def __init__ ( self , hostname , addr_str , port ) :
self . address = lisp_address ( LISP_AFI_IPV4 , addr_str , 32 , 0 )
self . port = port
self . uptime = lisp_get_timestamp ( )
self . nonce = None
self . hostname = hostname
self . no_timeout = False
if 23 - 23: iII111i . Ii1I - OoO0O00 / I1ii11iIi11i / O0
if 4 - 4: i1IIi % Oo0Ooo % Ii1I * ooOoO0o - I11i
def cache_address_for_info_source ( self ) :
Iiii11 = self . address . print_address_no_iid ( ) + self . hostname
lisp_info_sources_by_address [ Iiii11 ] = self
if 76 - 76: iIii1I11I1II1 / ooOoO0o % I1ii11iIi11i % OOooOOo
if 13 - 13: IiII
def cache_nonce_for_info_source ( self , nonce ) :
self . nonce = nonce
lisp_info_sources_by_nonce [ nonce ] = self
if 56 - 56: Oo0Ooo
if 55 - 55: i11iIiiIii + iIii1I11I1II1 / i1IIi / I1ii11iIi11i
if 64 - 64: IiII . OoO0O00 * i11iIiiIii
if 18 - 18: Ii1I % o0oOOo0O0Ooo - Oo0Ooo
if 28 - 28: IiII
if 93 - 93: Oo0Ooo % i1IIi
if 51 - 51: oO0o % O0
if 41 - 41: I1IiiI * I1IiiI . I1Ii111
if 38 - 38: I1IiiI % i11iIiiIii
if 17 - 17: i11iIiiIii
if 81 - 81: I1Ii111
def lisp_concat_auth_data ( alg_id , auth1 , auth2 , auth3 , auth4 ) :
if 25 - 25: I1IiiI
if ( lisp_is_x86 ( ) ) :
if ( auth1 != "" ) : auth1 = byte_swap_64 ( auth1 )
if ( auth2 != "" ) : auth2 = byte_swap_64 ( auth2 )
if ( auth3 != "" ) :
if ( alg_id == LISP_SHA_1_96_ALG_ID ) : auth3 = socket . ntohl ( auth3 )
else : auth3 = byte_swap_64 ( auth3 )
if 52 - 52: I1ii11iIi11i % i1IIi . IiII % OoOoOO00
if ( auth4 != "" ) : auth4 = byte_swap_64 ( auth4 )
if 50 - 50: OOooOOo * I1IiiI / o0oOOo0O0Ooo
if 91 - 91: iIii1I11I1II1 / OOooOOo * O0 . o0oOOo0O0Ooo + oO0o / I1ii11iIi11i
if ( alg_id == LISP_SHA_1_96_ALG_ID ) :
auth1 = lisp_hex_string ( auth1 )
auth1 = auth1 . zfill ( 16 )
auth2 = lisp_hex_string ( auth2 )
auth2 = auth2 . zfill ( 16 )
auth3 = lisp_hex_string ( auth3 )
auth3 = auth3 . zfill ( 8 )
I1iiI1II11 = auth1 + auth2 + auth3
if 33 - 33: II111iiii + Ii1I
if ( alg_id == LISP_SHA_256_128_ALG_ID ) :
auth1 = lisp_hex_string ( auth1 )
auth1 = auth1 . zfill ( 16 )
auth2 = lisp_hex_string ( auth2 )
auth2 = auth2 . zfill ( 16 )
auth3 = lisp_hex_string ( auth3 )
auth3 = auth3 . zfill ( 16 )
auth4 = lisp_hex_string ( auth4 )
auth4 = auth4 . zfill ( 16 )
I1iiI1II11 = auth1 + auth2 + auth3 + auth4
if 46 - 46: IiII + O0 + i1IIi + ooOoO0o / iII111i
return ( I1iiI1II11 )
if 94 - 94: oO0o + iII111i * OoOoOO00 - i1IIi / OoooooooOO
if 59 - 59: I11i % Ii1I / OoOoOO00
if 99 - 99: Ii1I + II111iiii / i11iIiiIii - IiII / iII111i + iII111i
if 55 - 55: IiII + OoooooooOO * I1ii11iIi11i . IiII * I1ii11iIi11i + IiII
if 81 - 81: iIii1I11I1II1 . ooOoO0o + OoOoOO00
if 31 - 31: I11i / OoOoOO00 + o0oOOo0O0Ooo
if 80 - 80: Oo0Ooo
if 58 - 58: I1Ii111 + OOooOOo
if 76 - 76: II111iiii - o0oOOo0O0Ooo % OoO0O00 + iII111i
if 38 - 38: I1Ii111 - I11i * i1IIi + iIii1I11I1II1
def lisp_open_listen_socket ( local_addr , port ) :
if ( port . isdigit ( ) ) :
if ( local_addr . find ( "." ) != - 1 ) :
I1iII1iI1 = socket . socket ( socket . AF_INET , socket . SOCK_DGRAM )
if 15 - 15: OoooooooOO + I11i
if ( local_addr . find ( ":" ) != - 1 ) :
if ( lisp_is_raspbian ( ) ) : return ( None )
I1iII1iI1 = socket . socket ( socket . AF_INET6 , socket . SOCK_DGRAM )
if 76 - 76: O0 % Ii1I * ooOoO0o
I1iII1iI1 . bind ( ( local_addr , int ( port ) ) )
else :
i1i1Ii = port
if ( os . path . exists ( i1i1Ii ) ) :
os . system ( "rm " + i1i1Ii )
time . sleep ( 1 )
if 13 - 13: OoooooooOO + OoO0O00 % OOooOOo * OoooooooOO
I1iII1iI1 = socket . socket ( socket . AF_UNIX , socket . SOCK_DGRAM )
I1iII1iI1 . bind ( i1i1Ii )
if 21 - 21: Ii1I % O0
return ( I1iII1iI1 )
if 15 - 15: II111iiii * Ii1I + IiII % iII111i
if 96 - 96: II111iiii * I1Ii111 / Oo0Ooo
if 35 - 35: I1IiiI
if 54 - 54: I1ii11iIi11i % o0oOOo0O0Ooo . i1IIi
if 72 - 72: Ii1I
if 87 - 87: iII111i - I1IiiI
if 54 - 54: iIii1I11I1II1 + oO0o * o0oOOo0O0Ooo % OoooooooOO . Oo0Ooo
def lisp_open_send_socket ( internal_name , afi ) :
if ( internal_name == "" ) :
if ( afi == LISP_AFI_IPV4 ) :
I1iII1iI1 = socket . socket ( socket . AF_INET , socket . SOCK_DGRAM )
if 32 - 32: iII111i
if ( afi == LISP_AFI_IPV6 ) :
if ( lisp_is_raspbian ( ) ) : return ( None )
I1iII1iI1 = socket . socket ( socket . AF_INET6 , socket . SOCK_DGRAM )
if 33 - 33: ooOoO0o + Oo0Ooo * OoOoOO00 % ooOoO0o * oO0o - OoO0O00
else :
if ( os . path . exists ( internal_name ) ) : os . system ( "rm " + internal_name )
I1iII1iI1 = socket . socket ( socket . AF_UNIX , socket . SOCK_DGRAM )
I1iII1iI1 . bind ( internal_name )
if 40 - 40: I11i . OoooooooOO * O0 / I1Ii111 + O0
return ( I1iII1iI1 )
if 97 - 97: ooOoO0o - ooOoO0o * OOooOOo % OoOoOO00 - OoOoOO00 - I1Ii111
if 52 - 52: O0 % iII111i
if 81 - 81: OoooooooOO % OoOoOO00 % Oo0Ooo - I1IiiI
if 43 - 43: o0oOOo0O0Ooo % o0oOOo0O0Ooo
if 48 - 48: O0
if 5 - 5: OOooOOo / i11iIiiIii . I11i % OOooOOo
if 1 - 1: II111iiii + O0 * OoOoOO00 / IiII . O0
def lisp_close_socket ( sock , internal_name ) :
sock . close ( )
if ( os . path . exists ( internal_name ) ) : os . system ( "rm " + internal_name )
return
if 87 - 87: IiII + I1IiiI
if 74 - 74: OoO0O00 + OoO0O00 % iII111i / I11i / O0
if 54 - 54: o0oOOo0O0Ooo / OoooooooOO * ooOoO0o . OoOoOO00 - I1Ii111
if 69 - 69: oO0o - OoO0O00
if 80 - 80: ooOoO0o + iIii1I11I1II1 . II111iiii + I1IiiI - oO0o % OoOoOO00
if 10 - 10: iIii1I11I1II1
if 44 - 44: OoOoOO00 * oO0o . I1ii11iIi11i + i11iIiiIii
if 85 - 85: I11i
def lisp_is_running ( node ) :
return ( True if ( os . path . exists ( node ) ) else False )
if 36 - 36: ooOoO0o % OoO0O00
if 1 - 1: OoooooooOO - OoOoOO00
if 35 - 35: I1Ii111
if 35 - 35: Oo0Ooo - iIii1I11I1II1 / i1IIi + OoO0O00 - OoooooooOO / i11iIiiIii
if 79 - 79: I1IiiI * ooOoO0o * ooOoO0o
if 92 - 92: iII111i % I1ii11iIi11i
if 16 - 16: oO0o
if 52 - 52: OoooooooOO % ooOoO0o - I1Ii111 * I11i
if 24 - 24: Ii1I + IiII + OoooooooOO / oO0o / I1IiiI + IiII
def lisp_packet_ipc ( packet , source , sport ) :
return ( ( "packet@" + str ( len ( packet ) ) + "@" + source + "@" + str ( sport ) + "@" + packet ) )
if 52 - 52: ooOoO0o
if 38 - 38: OoO0O00 + I1IiiI % IiII
if 87 - 87: oO0o * Ii1I - I1Ii111 / oO0o
if 65 - 65: OoOoOO00
if 87 - 87: I11i - i11iIiiIii - OOooOOo . OoOoOO00 + IiII . OoO0O00
if 70 - 70: iIii1I11I1II1 % OoooooooOO / OoO0O00 . O0 - I11i % II111iiii
if 84 - 84: OOooOOo * i1IIi . iIii1I11I1II1 * iII111i + I1Ii111 + II111iiii
if 97 - 97: Ii1I - IiII
if 64 - 64: oO0o . ooOoO0o / ooOoO0o - II111iiii
def lisp_control_packet_ipc ( packet , source , dest , dport ) :
return ( "control-packet@" + dest + "@" + str ( dport ) + "@" + packet )
if 81 - 81: I1ii11iIi11i
if 64 - 64: oO0o * OoO0O00 / OOooOOo + Ii1I % Oo0Ooo . IiII
if 2 - 2: I1Ii111 + I11i
if 47 - 47: i11iIiiIii + iIii1I11I1II1 % I1ii11iIi11i - oO0o % OoO0O00
if 85 - 85: oO0o * OoOoOO00 / OoOoOO00
if 85 - 85: OOooOOo / I1Ii111 . i1IIi / OoOoOO00 + iIii1I11I1II1
if 71 - 71: OoO0O00
def lisp_data_packet_ipc ( packet , source ) :
return ( "data-packet@" + str ( len ( packet ) ) + "@" + source + "@@" + packet )
if 96 - 96: I1ii11iIi11i / I1IiiI - I1ii11iIi11i / II111iiii - IiII
if 74 - 74: Ii1I * OoooooooOO % OOooOOo + OoooooooOO + iII111i
if 83 - 83: i1IIi
if 2 - 2: i1IIi / OOooOOo * O0
if 99 - 99: OoooooooOO . OoOoOO00 / II111iiii
if 64 - 64: iII111i / i1IIi . I1IiiI + O0
if 5 - 5: O0 . i11iIiiIii
if 71 - 71: o0oOOo0O0Ooo + iII111i + ooOoO0o
if 27 - 27: OoooooooOO . iII111i * I1Ii111 % O0 + OoooooooOO - iII111i
def lisp_command_ipc ( packet , source ) :
return ( "command@" + str ( len ( packet ) ) + "@" + source + "@@" + packet )
if 86 - 86: i1IIi
if 81 - 81: OoOoOO00
if 52 - 52: iII111i * IiII % I1IiiI * I11i
if 73 - 73: I1Ii111 * ooOoO0o
if 62 - 62: OOooOOo . I1IiiI * iIii1I11I1II1 + OoO0O00 * ooOoO0o / oO0o
if 14 - 14: iII111i / OoO0O00
if 75 - 75: IiII
if 68 - 68: IiII - i1IIi % IiII . OoO0O00 . i11iIiiIii . OoooooooOO
if 32 - 32: iII111i + OoO0O00 % IiII + I1IiiI
def lisp_api_ipc ( source , data ) :
return ( "api@" + str ( len ( data ) ) + "@" + source + "@@" + data )
if 69 - 69: I1Ii111 + I11i - iIii1I11I1II1 - II111iiii . Ii1I
if 74 - 74: I1ii11iIi11i % o0oOOo0O0Ooo + O0 - i11iIiiIii - IiII % OOooOOo
if 39 - 39: OoO0O00 - o0oOOo0O0Ooo
if 71 - 71: iII111i . OoO0O00 + ooOoO0o - OOooOOo - Oo0Ooo
if 100 - 100: OoooooooOO - o0oOOo0O0Ooo + I1Ii111 . OoooooooOO % i11iIiiIii
if 64 - 64: I1Ii111 % OoooooooOO / i1IIi / OoO0O00
if 2 - 2: I11i % o0oOOo0O0Ooo . OoO0O00 . OoO0O00
if 89 - 89: ooOoO0o - oO0o + II111iiii + OoO0O00 - IiII
if 27 - 27: I1Ii111 - o0oOOo0O0Ooo + OoO0O00
def lisp_ipc ( packet , send_socket , node ) :
if 38 - 38: OoOoOO00 + OoO0O00 . i11iIiiIii + Ii1I % i1IIi % I1IiiI
if 93 - 93: i11iIiiIii
if 63 - 63: iIii1I11I1II1 - iIii1I11I1II1 % o0oOOo0O0Ooo
if 97 - 97: i1IIi % I11i % OoOoOO00
if ( lisp_is_running ( node ) == False ) :
lprint ( "Suppress sending IPC to {}" . format ( node ) )
return
if 25 - 25: OoOoOO00 . iIii1I11I1II1 - iII111i % II111iiii . OoOoOO00
if 16 - 16: OOooOOo . Oo0Ooo . I1IiiI % O0 . I1ii11iIi11i + i11iIiiIii
OOOo0OOOO = 1500 if ( packet . find ( "control-packet" ) == - 1 ) else 9000
if 31 - 31: i1IIi
ii = 0
OOOOO000oo0 = len ( packet )
II1ii = 0
ii1 = .001
while ( OOOOO000oo0 > 0 ) :
ooOO0O0OOOOoo = min ( OOOOO000oo0 , OOOo0OOOO )
oo0 = packet [ ii : ooOO0O0OOOOoo + ii ]
if 64 - 64: OoooooooOO
try :
send_socket . sendto ( oo0 , node )
lprint ( "Send IPC {}-out-of-{} byte to {} succeeded" . format ( len ( oo0 ) , len ( packet ) , node ) )
if 25 - 25: IiII
II1ii = 0
ii1 = .001
if 29 - 29: OoOoOO00 % ooOoO0o * OoooooooOO
except socket . error , Oo0ooo0Ooo :
if ( II1ii == 12 ) :
lprint ( "Giving up on {}, consider it down" . format ( node ) )
break
if 8 - 8: i11iIiiIii - I1Ii111 / IiII
if 17 - 17: i11iIiiIii * OoO0O00 . o0oOOo0O0Ooo . OoooooooOO . OoOoOO00 - I1ii11iIi11i
lprint ( "Send IPC {}-out-of-{} byte to {} failed: {}" . format ( len ( oo0 ) , len ( packet ) , node , Oo0ooo0Ooo ) )
if 78 - 78: I1ii11iIi11i - OoooooooOO + O0
if 15 - 15: I1ii11iIi11i / IiII % I1IiiI
II1ii += 1
time . sleep ( ii1 )
if 16 - 16: Ii1I
lprint ( "Retrying after {} ms ..." . format ( ii1 * 1000 ) )
ii1 *= 2
continue
if 26 - 26: o0oOOo0O0Ooo / I11i + OoOoOO00 / OoOoOO00
if 31 - 31: I1Ii111
ii += ooOO0O0OOOOoo
OOOOO000oo0 -= ooOO0O0OOOOoo
if 84 - 84: i11iIiiIii * OOooOOo . iII111i - Ii1I * i1IIi - I1ii11iIi11i
return
if 1 - 1: II111iiii
if 94 - 94: I1ii11iIi11i * iII111i % iII111i % I11i - iII111i
if 38 - 38: IiII - OoO0O00 % Ii1I - II111iiii
if 97 - 97: O0 . Ii1I
if 52 - 52: IiII
if 86 - 86: I1Ii111 / O0 + OoooooooOO % oO0o
if 45 - 45: I1IiiI . Oo0Ooo . I11i . Ii1I
def lisp_format_packet ( packet ) :
packet = binascii . hexlify ( packet )
ii = 0
iiiii1I = ""
OOOOO000oo0 = len ( packet ) * 2
while ( ii < OOOOO000oo0 ) :
iiiii1I += packet [ ii : ii + 8 ] + " "
ii += 8
OOOOO000oo0 -= 4
if 81 - 81: II111iiii + OoOoOO00 % i11iIiiIii / iII111i . I1Ii111 + II111iiii
return ( iiiii1I )
if 48 - 48: I1IiiI . I1ii11iIi11i * OoOoOO00 % i1IIi / I1Ii111 * II111iiii
if 62 - 62: o0oOOo0O0Ooo * I1Ii111 . iIii1I11I1II1 / i1IIi
if 75 - 75: OoooooooOO / ooOoO0o - iII111i . OoooooooOO . OoOoOO00 % i1IIi
if 7 - 7: OoOoOO00 . i1IIi * i11iIiiIii % i11iIiiIii
if 54 - 54: OoO0O00 / I1IiiI . Oo0Ooo
if 39 - 39: OoO0O00 . ooOoO0o
if 41 - 41: Oo0Ooo * I1ii11iIi11i - II111iiii - II111iiii
def lisp_send ( lisp_sockets , dest , port , packet ) :
iIi11II1I = lisp_sockets [ 0 ] if dest . is_ipv4 ( ) else lisp_sockets [ 1 ]
if 95 - 95: OoOoOO00 / I1IiiI - i1IIi / i11iIiiIii * o0oOOo0O0Ooo
if 12 - 12: I1ii11iIi11i + iII111i % II111iiii * I1Ii111 . II111iiii / I1Ii111
if 82 - 82: OoO0O00 - i1IIi / OOooOOo
if 31 - 31: I11i + I1Ii111 + ooOoO0o / OoOoOO00
if 68 - 68: Oo0Ooo % IiII * I1IiiI % I1ii11iIi11i % OoooooooOO
if 63 - 63: Oo0Ooo / I11i . iII111i + ooOoO0o / I1ii11iIi11i / I1IiiI
if 43 - 43: OoOoOO00 / I1Ii111 % I11i / I1IiiI - IiII - ooOoO0o
if 25 - 25: OOooOOo * OoOoOO00 + I11i . ooOoO0o
if 96 - 96: iIii1I11I1II1 / Ii1I
if 92 - 92: OoO0O00 * I1ii11iIi11i + iIii1I11I1II1
if 88 - 88: iIii1I11I1II1 + iIii1I11I1II1 * i11iIiiIii . I1ii11iIi11i % oO0o
if 94 - 94: I1IiiI / I1ii11iIi11i / OOooOOo
Iiii1Ii1I = dest . print_address_no_iid ( )
if ( Iiii1Ii1I . find ( "::ffff:" ) != - 1 and Iiii1Ii1I . count ( "." ) == 3 ) :
if ( lisp_i_am_rtr ) : iIi11II1I = lisp_sockets [ 0 ]
if ( iIi11II1I == None ) :
iIi11II1I = lisp_sockets [ 0 ]
Iiii1Ii1I = Iiii1Ii1I . split ( "::ffff:" ) [ - 1 ]
if 45 - 45: II111iiii
if 98 - 98: i11iIiiIii + I1ii11iIi11i * OOooOOo / OoOoOO00
if 84 - 84: o0oOOo0O0Ooo
lprint ( "{} {} bytes {} {}, packet: {}" . format ( bold ( "Send" , False ) ,
len ( packet ) , bold ( "to " + Iiii1Ii1I , False ) , port ,
lisp_format_packet ( packet ) ) )
if 40 - 40: OoooooooOO - oO0o / O0 * I1Ii111 . O0 + i11iIiiIii
if 9 - 9: OOooOOo % O0 % O0 / I1ii11iIi11i . II111iiii / II111iiii
if 78 - 78: iIii1I11I1II1 - i1IIi . I11i . o0oOOo0O0Ooo
if 66 - 66: OOooOOo * Oo0Ooo
o0000Oo0oo = ( LISP_RLOC_PROBE_TTL == 255 )
if ( o0000Oo0oo ) :
i1IIII1IiI = struct . unpack ( "B" , packet [ 0 ] ) [ 0 ]
o0000Oo0oo = ( i1IIII1IiI in [ 0x12 , 0x28 ] )
if ( o0000Oo0oo ) : lisp_set_ttl ( iIi11II1I , LISP_RLOC_PROBE_TTL )
if 61 - 61: O0
if 100 - 100: i11iIiiIii * O0 / Oo0Ooo % II111iiii
try : iIi11II1I . sendto ( packet , ( Iiii1Ii1I , port ) )
except socket . error , Oo0ooo0Ooo :
lprint ( "socket.sendto() failed: {}" . format ( Oo0ooo0Ooo ) )
if 49 - 49: oO0o
if 98 - 98: OoooooooOO . II111iiii
if 12 - 12: OoO0O00 - I1Ii111 / O0 - iII111i
if 44 - 44: i1IIi
if 23 - 23: I1ii11iIi11i . OoooooooOO / Ii1I + o0oOOo0O0Ooo
if ( o0000Oo0oo ) : lisp_set_ttl ( iIi11II1I , 64 )
return
if 89 - 89: OoOoOO00 + Oo0Ooo . OoOoOO00 - II111iiii
if 85 - 85: OoooooooOO * OoooooooOO / Ii1I - II111iiii
if 69 - 69: iII111i * I11i
if 43 - 43: o0oOOo0O0Ooo - IiII * Ii1I . i11iIiiIii / II111iiii
if 61 - 61: OoOoOO00 / I1IiiI . I1ii11iIi11i % OOooOOo
if 70 - 70: OOooOOo * OoOoOO00 / oO0o + Oo0Ooo / O0
if 16 - 16: Oo0Ooo / OoooooooOO / IiII + Oo0Ooo * i11iIiiIii
if 15 - 15: o0oOOo0O0Ooo / i11iIiiIii
def lisp_receive_segments ( lisp_socket , packet , source , total_length ) :
if 63 - 63: I1ii11iIi11i - Ii1I + I11i
if 98 - 98: iII111i / IiII * I1IiiI / oO0o - iIii1I11I1II1
if 72 - 72: O0 . OOooOOo
if 99 - 99: i1IIi + iIii1I11I1II1 - ooOoO0o + OoO0O00 + Oo0Ooo . I1ii11iIi11i
if 74 - 74: i1IIi
ooOO0O0OOOOoo = total_length - len ( packet )
if ( ooOO0O0OOOOoo == 0 ) : return ( [ True , packet ] )
if 80 - 80: ooOoO0o + I1Ii111 . I1ii11iIi11i % OoooooooOO
lprint ( "Received {}-out-of-{} byte segment from {}" . format ( len ( packet ) ,
total_length , source ) )
if 26 - 26: OoOoOO00 . iII111i * iIii1I11I1II1 / IiII
if 69 - 69: OoooooooOO / I11i + Ii1I * II111iiii
if 35 - 35: i11iIiiIii + oO0o
if 85 - 85: OoOoOO00 . O0 % OoooooooOO % oO0o
if 43 - 43: I1IiiI - I11i . I1IiiI / i11iIiiIii % IiII * i11iIiiIii
OOOOO000oo0 = ooOO0O0OOOOoo
while ( OOOOO000oo0 > 0 ) :
try : oo0 = lisp_socket . recvfrom ( 9000 )
except : return ( [ False , None ] )
if 12 - 12: II111iiii - iIii1I11I1II1
oo0 = oo0 [ 0 ]
if 43 - 43: i11iIiiIii % OoO0O00
if 100 - 100: i1IIi
if 4 - 4: i11iIiiIii - OOooOOo * IiII % OoooooooOO - OoOoOO00
if 81 - 81: Ii1I * ooOoO0o . oO0o . IiII
if 71 - 71: IiII + OoO0O00
if ( oo0 . find ( "packet@" ) == 0 ) :
Iii1iii1II = oo0 . split ( "@" )
lprint ( "Received new message ({}-out-of-{}) while receiving " + "fragments, old message discarded" , len ( oo0 ) ,
# i11iIiiIii + o0oOOo0O0Ooo
Iii1iii1II [ 1 ] if len ( Iii1iii1II ) > 2 else "?" )
return ( [ False , oo0 ] )
if 30 - 30: O0 - O0 % iIii1I11I1II1 + iII111i * OoooooooOO
if 1 - 1: O0
OOOOO000oo0 -= len ( oo0 )
packet += oo0
if 36 - 36: oO0o . iII111i
lprint ( "Received {}-out-of-{} byte segment from {}" . format ( len ( oo0 ) , total_length , source ) )
if 62 - 62: I11i + iIii1I11I1II1 % I11i * OOooOOo + iIii1I11I1II1 % Ii1I
if 56 - 56: o0oOOo0O0Ooo
return ( [ True , packet ] )
if 55 - 55: oO0o - I1Ii111 / ooOoO0o % I1IiiI * OoooooooOO * I1IiiI
if 88 - 88: Ii1I + O0
if 92 - 92: I1IiiI % iII111i % I11i + OoooooooOO - i11iIiiIii
if 9 - 9: i11iIiiIii - II111iiii / ooOoO0o
if 81 - 81: i11iIiiIii % OoOoOO00 % OoO0O00 * Ii1I
if 85 - 85: OoooooooOO * ooOoO0o
if 23 - 23: OOooOOo / I11i / OoooooooOO - Ii1I / OoO0O00 - OoO0O00
if 60 - 60: OOooOOo . ooOoO0o % i1IIi % Ii1I % ooOoO0o + OoO0O00
def lisp_bit_stuff ( payload ) :
lprint ( "Bit-stuffing, found {} segments" . format ( len ( payload ) ) )
oOo = ""
for oo0 in payload : oOo += oo0 + "\x40"
return ( oOo [ : - 1 ] )
if 26 - 26: O0 % o0oOOo0O0Ooo + iII111i * I1ii11iIi11i * I1Ii111
if 4 - 4: OOooOOo * OoooooooOO * i1IIi % I1ii11iIi11i % Oo0Ooo
if 1 - 1: OoO0O00 / iIii1I11I1II1 % I1ii11iIi11i - o0oOOo0O0Ooo
if 62 - 62: I1Ii111 % II111iiii
if 91 - 91: I11i % Ii1I - IiII + iIii1I11I1II1 * iIii1I11I1II1
if 91 - 91: i11iIiiIii + Ii1I
if 85 - 85: I11i % IiII
if 68 - 68: Oo0Ooo . I1Ii111 - o0oOOo0O0Ooo * iIii1I11I1II1 - II111iiii % i1IIi
if 58 - 58: I11i / i11iIiiIii * i11iIiiIii
if 24 - 24: ooOoO0o - I1Ii111 * II111iiii - II111iiii
if 47 - 47: IiII - iIii1I11I1II1 / OoOoOO00 * iII111i - iIii1I11I1II1 % oO0o
if 93 - 93: Ii1I / iII111i
if 100 - 100: Oo0Ooo
if 94 - 94: I1ii11iIi11i / i1IIi * I1IiiI - I11i - I1ii11iIi11i
if 6 - 6: I1ii11iIi11i % o0oOOo0O0Ooo + o0oOOo0O0Ooo / OOooOOo / I1IiiI
if 67 - 67: OoOoOO00 . iII111i / OOooOOo * ooOoO0o + i1IIi
if 100 - 100: OOooOOo . ooOoO0o + I1Ii111 . oO0o
if 20 - 20: i11iIiiIii - i1IIi - iIii1I11I1II1 - OoooooooOO
if 72 - 72: I1Ii111 . OoO0O00
if 59 - 59: I1IiiI * I11i % i1IIi
def lisp_receive ( lisp_socket , internal ) :
while ( True ) :
if 77 - 77: OOooOOo * OoooooooOO + I1IiiI + I1IiiI % oO0o . OoooooooOO
if 60 - 60: iIii1I11I1II1
if 13 - 13: II111iiii + Ii1I
if 33 - 33: i1IIi
try : i1i1iIIiIi = lisp_socket . recvfrom ( 9000 )
except : return ( [ "" , "" , "" , "" ] )
if 94 - 94: Ii1I - iIii1I11I1II1 % I1ii11iIi11i . OOooOOo / O0
if 64 - 64: Ii1I % I1IiiI * OoO0O00
if 41 - 41: iIii1I11I1II1 / oO0o * oO0o - II111iiii + OOooOOo + i1IIi
if 1 - 1: iII111i + I1IiiI
if 34 - 34: OoO0O00
if 71 - 71: OoOoOO00 + iII111i - I1IiiI
if ( internal == False ) :
oOo = i1i1iIIiIi [ 0 ]
oo = lisp_convert_6to4 ( i1i1iIIiIi [ 1 ] [ 0 ] )
Iiiii = i1i1iIIiIi [ 1 ] [ 1 ]
if 80 - 80: OoO0O00 . ooOoO0o
if ( Iiiii == LISP_DATA_PORT ) :
O0oO0 = lisp_data_plane_logging
oO0o00 = lisp_format_packet ( oOo [ 0 : 60 ] ) + " ..."
else :
O0oO0 = True
oO0o00 = lisp_format_packet ( oOo )
if 57 - 57: iIii1I11I1II1 % I1ii11iIi11i
if 38 - 38: iIii1I11I1II1 . i11iIiiIii % oO0o
if ( O0oO0 ) :
lprint ( "{} {} bytes {} {}, packet: {}" . format ( bold ( "Receive" ,
False ) , len ( oOo ) , bold ( "from " + oo , False ) , Iiiii ,
oO0o00 ) )
if 92 - 92: I11i
return ( [ "packet" , oo , Iiiii , oOo ] )
if 96 - 96: O0 / i1IIi - i11iIiiIii / OoOoOO00 + OoooooooOO
if 12 - 12: oO0o . OOooOOo
if 76 - 76: oO0o - I11i * I1Ii111 . oO0o % iIii1I11I1II1
if 86 - 86: OoooooooOO + I1Ii111
if 5 - 5: I1ii11iIi11i
if 89 - 89: OoO0O00 - OoOoOO00 / II111iiii . I1ii11iIi11i
I111iiiI11ii = False
i11 = i1i1iIIiIi [ 0 ]
oooOOOo0 = False
if 28 - 28: OoO0O00 * i11iIiiIii % OoO0O00
while ( I111iiiI11ii == False ) :
i11 = i11 . split ( "@" )
if 84 - 84: oO0o . I1Ii111
if ( len ( i11 ) < 4 ) :
lprint ( "Possible fragment (length {}), from old message, " + "discarding" , len ( i11 [ 0 ] ) )
if 100 - 100: OoOoOO00 + OoOoOO00
oooOOOo0 = True
break
if 26 - 26: II111iiii * iII111i + OOooOOo
if 28 - 28: Ii1I + O0
iII1IiI1i = i11 [ 0 ]
try :
o000oO = int ( i11 [ 1 ] )
except :
iI1iI1I = bold ( "Internal packet reassembly error" , False )
lprint ( "{}: {}" . format ( iI1iI1I , i1i1iIIiIi ) )
oooOOOo0 = True
break
if 2 - 2: ooOoO0o % OoO0O00
oo = i11 [ 2 ]
Iiiii = i11 [ 3 ]
if 31 - 31: iII111i * iIii1I11I1II1 - I1ii11iIi11i
if 20 - 20: OoooooooOO
if 77 - 77: Oo0Ooo - ooOoO0o
if 68 - 68: Ii1I * O0
if 61 - 61: II111iiii - OoO0O00 . iIii1I11I1II1 * o0oOOo0O0Ooo . OoO0O00 % IiII
if 11 - 11: oO0o + I11i
if 6 - 6: i1IIi . o0oOOo0O0Ooo + OoO0O00 + OOooOOo + oO0o
if 30 - 30: O0
if ( len ( i11 ) > 5 ) :
oOo = lisp_bit_stuff ( i11 [ 4 : : ] )
else :
oOo = i11 [ 4 ]
if 98 - 98: I1Ii111
if 58 - 58: OOooOOo
if 6 - 6: I1ii11iIi11i
if 37 - 37: i11iIiiIii . II111iiii + OOooOOo + i1IIi * OOooOOo
if 18 - 18: ooOoO0o
if 18 - 18: I1Ii111 + OoOoOO00 % OOooOOo - IiII - i1IIi + I1ii11iIi11i
I111iiiI11ii , oOo = lisp_receive_segments ( lisp_socket , oOo ,
oo , o000oO )
if ( oOo == None ) : return ( [ "" , "" , "" , "" ] )
if 33 - 33: I11i * Ii1I / Oo0Ooo + oO0o % OOooOOo % OoooooooOO
if 29 - 29: Ii1I . II111iiii / I1Ii111
if 79 - 79: IiII . OoOoOO00 / oO0o % OoO0O00 / Ii1I + I11i
if 78 - 78: o0oOOo0O0Ooo + I1Ii111 % i11iIiiIii % I1IiiI - Ii1I
if 81 - 81: i11iIiiIii - II111iiii + I11i
if ( I111iiiI11ii == False ) :
i11 = oOo
continue
if 52 - 52: II111iiii
if 62 - 62: iII111i / OoO0O00 + i11iIiiIii / Oo0Ooo
if ( Iiiii == "" ) : Iiiii = "no-port"
if ( iII1IiI1i == "command" and lisp_i_am_core == False ) :
oo0OOo0O = oOo . find ( " {" )
iIIiIiiI = oOo if oo0OOo0O == - 1 else oOo [ : oo0OOo0O ]
iIIiIiiI = ": '" + iIIiIiiI + "'"
else :
iIIiIiiI = ""
if 60 - 60: iIii1I11I1II1
if 70 - 70: I11i
lprint ( "{} {} bytes {} {}, {}{}" . format ( bold ( "Receive" , False ) ,
len ( oOo ) , bold ( "from " + oo , False ) , Iiiii , iII1IiI1i ,
iIIiIiiI if ( iII1IiI1i in [ "command" , "api" ] ) else ": ... " if ( iII1IiI1i == "data-packet" ) else ": " + lisp_format_packet ( oOo ) ) )
if 38 - 38: o0oOOo0O0Ooo . OoO0O00 + I1ii11iIi11i - I1IiiI * i1IIi
if 17 - 17: OoO0O00 % o0oOOo0O0Ooo
if 21 - 21: OOooOOo + OOooOOo - i11iIiiIii * IiII % iIii1I11I1II1
if 86 - 86: ooOoO0o + OoOoOO00
if 94 - 94: IiII
if ( oooOOOo0 ) : continue
return ( [ iII1IiI1i , oo , Iiiii , oOo ] )
if 30 - 30: o0oOOo0O0Ooo % OoOoOO00 * IiII % iIii1I11I1II1 % O0
if 76 - 76: II111iiii * I11i
if 29 - 29: OoooooooOO . i1IIi
if 46 - 46: I11i
if 92 - 92: IiII * OoO0O00 . OoOoOO00 + iII111i - I1IiiI
if 15 - 15: OoO0O00 / OoO0O00 * o0oOOo0O0Ooo * I1ii11iIi11i - o0oOOo0O0Ooo
if 47 - 47: I1IiiI / OoOoOO00 / II111iiii
if 7 - 7: oO0o . ooOoO0o
def lisp_parse_packet ( lisp_sockets , packet , source , udp_sport , ttl = - 1 ) :
Oo0000O0o0 = False
if 99 - 99: IiII / i11iIiiIii - II111iiii . ooOoO0o
oooooOOo0Oo = lisp_control_header ( )
if ( oooooOOo0Oo . decode ( packet ) == None ) :
lprint ( "Could not decode control header" )
return ( Oo0000O0o0 )
if 29 - 29: OoO0O00 - Ii1I
if 35 - 35: IiII
if 99 - 99: iIii1I11I1II1 % I1Ii111 . IiII
if 7 - 7: OOooOOo + II111iiii + I1IiiI . Oo0Ooo / iIii1I11I1II1 . oO0o
if 30 - 30: OoO0O00 / OOooOOo
O0oOOoO0O0 = source
if ( source . find ( "lisp" ) == - 1 ) :
o00oOOO = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
o00oOOO . string_to_afi ( source )
o00oOOO . store_address ( source )
source = o00oOOO
if 80 - 80: oO0o
if 21 - 21: iII111i + OoOoOO00 - i11iIiiIii % O0 + OOooOOo
if ( oooooOOo0Oo . type == LISP_MAP_REQUEST ) :
lisp_process_map_request ( lisp_sockets , packet , None , 0 , source ,
udp_sport , False , ttl )
if 30 - 30: o0oOOo0O0Ooo - Oo0Ooo + iII111i / O0
elif ( oooooOOo0Oo . type == LISP_MAP_REPLY ) :
lisp_process_map_reply ( lisp_sockets , packet , source , ttl )
if 94 - 94: IiII
elif ( oooooOOo0Oo . type == LISP_MAP_REGISTER ) :
lisp_process_map_register ( lisp_sockets , packet , source , udp_sport )
if 69 - 69: I1Ii111 . I1Ii111
elif ( oooooOOo0Oo . type == LISP_MAP_NOTIFY ) :
if ( O0oOOoO0O0 == "lisp-etr" ) :
lisp_process_multicast_map_notify ( packet , source )
else :
if ( lisp_is_running ( "lisp-rtr" ) ) :
lisp_process_multicast_map_notify ( packet , source )
if 53 - 53: i11iIiiIii + iII111i * Oo0Ooo - I1Ii111
lisp_process_map_notify ( lisp_sockets , packet , source )
if 61 - 61: o0oOOo0O0Ooo / OOooOOo . II111iiii - I1IiiI * i11iIiiIii
if 8 - 8: iII111i % o0oOOo0O0Ooo
elif ( oooooOOo0Oo . type == LISP_MAP_NOTIFY_ACK ) :
lisp_process_map_notify_ack ( packet , source )
if 87 - 87: Ii1I % I11i / I1Ii111
elif ( oooooOOo0Oo . type == LISP_MAP_REFERRAL ) :
lisp_process_map_referral ( lisp_sockets , packet , source )
if 21 - 21: OoO0O00 + Ii1I / I1Ii111
elif ( oooooOOo0Oo . type == LISP_NAT_INFO and oooooOOo0Oo . is_info_reply ( ) ) :
IiiIii1111Ii1I1 , iIIIIi , Oo0000O0o0 = lisp_process_info_reply ( source , packet , True )
if 75 - 75: I1Ii111 . Ii1I % iIii1I11I1II1 / OoOoOO00
elif ( oooooOOo0Oo . type == LISP_NAT_INFO and oooooOOo0Oo . is_info_reply ( ) == False ) :
ooOOo0o = source . print_address_no_iid ( )
lisp_process_info_request ( lisp_sockets , packet , ooOOo0o , udp_sport ,
None )
if 38 - 38: i1IIi
elif ( oooooOOo0Oo . type == LISP_ECM ) :
lisp_process_ecm ( lisp_sockets , packet , source , udp_sport )
if 1 - 1: I1ii11iIi11i + OoO0O00 % I11i . OOooOOo + i1IIi / oO0o
else :
lprint ( "Invalid LISP control packet type {}" . format ( oooooOOo0Oo . type ) )
if 35 - 35: ooOoO0o % OoOoOO00 % OoO0O00 + OOooOOo / IiII * OoOoOO00
return ( Oo0000O0o0 )
if 65 - 65: I1IiiI . Oo0Ooo + i1IIi - Ii1I * i1IIi
if 64 - 64: I1IiiI / OoO0O00 * I1IiiI * II111iiii . Ii1I
if 98 - 98: I1Ii111 + o0oOOo0O0Ooo
if 73 - 73: I1ii11iIi11i / I1Ii111 + i11iIiiIii + OoO0O00 . ooOoO0o
if 54 - 54: I1ii11iIi11i + IiII - oO0o + Oo0Ooo / IiII % Oo0Ooo
if 2 - 2: OOooOOo / I11i * I11i + I11i / O0 - OOooOOo
if 29 - 29: OoOoOO00 + i11iIiiIii % OoO0O00 - OoooooooOO
def lisp_process_rloc_probe_request ( lisp_sockets , map_request , source , port ,
ttl ) :
if 68 - 68: iII111i / OOooOOo
i111 = bold ( "RLOC-probe" , False )
if 28 - 28: II111iiii
if ( lisp_i_am_etr ) :
lprint ( "Received {} Map-Request, send RLOC-probe Map-Reply" . format ( i111 ) )
lisp_etr_process_map_request ( lisp_sockets , map_request , source , port ,
ttl )
return
if 49 - 49: I1ii11iIi11i
if 33 - 33: iIii1I11I1II1
if ( lisp_i_am_rtr ) :
lprint ( "Received {} Map-Request, send RLOC-probe Map-Reply" . format ( i111 ) )
lisp_rtr_process_map_request ( lisp_sockets , map_request , source , port ,
ttl )
return
if 72 - 72: I1ii11iIi11i * i11iIiiIii
if 12 - 12: O0 - iIii1I11I1II1 % Oo0Ooo / O0 - IiII
lprint ( "Ignoring received {} Map-Request, not an ETR or RTR" . format ( i111 ) )
return
if 55 - 55: OOooOOo . Oo0Ooo * OoOoOO00 / OoooooooOO * i11iIiiIii + oO0o
if 45 - 45: Ii1I
if 8 - 8: oO0o + OOooOOo
if 37 - 37: IiII - OoOoOO00 + oO0o - Oo0Ooo + IiII
if 33 - 33: Oo0Ooo % oO0o - I1IiiI + Oo0Ooo
def lisp_process_smr ( map_request ) :
lprint ( "Received SMR-based Map-Request" )
return
if 90 - 90: I1ii11iIi11i * I1Ii111 - iIii1I11I1II1 % IiII * I1Ii111 . I1Ii111
if 90 - 90: o0oOOo0O0Ooo - O0 % O0 - oO0o . OoooooooOO
if 30 - 30: I11i + O0 / Ii1I / OoOoOO00 - oO0o + II111iiii
if 21 - 21: iIii1I11I1II1 % OoooooooOO * OOooOOo % i1IIi
if 73 - 73: OoooooooOO
def lisp_process_smr_invoked_request ( map_request ) :
lprint ( "Received SMR-invoked Map-Request" )
return
if 100 - 100: I11i / i1IIi / i1IIi % Ii1I - II111iiii . OoooooooOO
if 72 - 72: Oo0Ooo * OoooooooOO % I1IiiI + I11i - II111iiii
if 82 - 82: iIii1I11I1II1 / i1IIi * I1IiiI . i11iIiiIii
if 56 - 56: Ii1I * I1IiiI / ooOoO0o * II111iiii
if 51 - 51: i1IIi . oO0o % OOooOOo
if 90 - 90: OoooooooOO + iII111i / iIii1I11I1II1
if 12 - 12: OoooooooOO
def lisp_build_map_reply ( eid , group , rloc_set , nonce , action , ttl , rloc_probe ,
keys , enc , auth , mr_ttl = - 1 ) :
Iiii = lisp_map_reply ( )
Iiii . rloc_probe = rloc_probe
Iiii . echo_nonce_capable = enc
Iiii . hop_count = 0 if ( mr_ttl == - 1 ) else mr_ttl
Iiii . record_count = 1
Iiii . nonce = nonce
oOo = Iiii . encode ( )
Iiii . print_map_reply ( )
if 60 - 60: oO0o . ooOoO0o
IiII1iiI = lisp_eid_record ( )
IiII1iiI . rloc_count = len ( rloc_set )
IiII1iiI . authoritative = auth
IiII1iiI . record_ttl = ttl
IiII1iiI . action = action
IiII1iiI . eid = eid
IiII1iiI . group = group
if 68 - 68: OoooooooOO . OoooooooOO % I1ii11iIi11i + i1IIi % OoooooooOO + Ii1I
oOo += IiII1iiI . encode ( )
IiII1iiI . print_record ( " " , False )
if 89 - 89: ooOoO0o + I11i * O0 % OoOoOO00
I1iiI1ii1i = lisp_get_all_addresses ( ) + lisp_get_all_translated_rlocs ( )
if 73 - 73: IiII . ooOoO0o / Oo0Ooo / OoOoOO00 . II111iiii
for O0OO0O in rloc_set :
o00o = lisp_rloc_record ( )
ooOOo0o = O0OO0O . rloc . print_address_no_iid ( )
if ( ooOOo0o in I1iiI1ii1i ) :
o00o . local_bit = True
o00o . probe_bit = rloc_probe
o00o . keys = keys
if ( O0OO0O . priority == 254 and lisp_i_am_rtr ) :
o00o . rloc_name = "RTR"
if 40 - 40: ooOoO0o - O0 - IiII - Ii1I % IiII / Ii1I
if 98 - 98: Ii1I * Oo0Ooo - O0 % OoOoOO00 + I1ii11iIi11i . II111iiii
o00o . store_rloc_entry ( O0OO0O )
o00o . reach_bit = True
o00o . print_record ( " " )
oOo += o00o . encode ( )
if 92 - 92: Oo0Ooo * IiII - Ii1I . OoOoOO00 / iIii1I11I1II1 . OOooOOo
return ( oOo )
if 53 - 53: i11iIiiIii
if 50 - 50: i11iIiiIii / i1IIi + i1IIi / Ii1I . o0oOOo0O0Ooo + OoOoOO00
if 29 - 29: I1ii11iIi11i % OOooOOo - I1IiiI / iII111i % OoOoOO00
if 15 - 15: o0oOOo0O0Ooo / OOooOOo % I1IiiI - I1IiiI / i1IIi * Ii1I
if 90 - 90: ooOoO0o % o0oOOo0O0Ooo * Ii1I . o0oOOo0O0Ooo + o0oOOo0O0Ooo * OoOoOO00
if 40 - 40: iIii1I11I1II1 - i11iIiiIii / i1IIi / II111iiii
if 37 - 37: Ii1I + o0oOOo0O0Ooo
def lisp_build_map_referral ( eid , group , ddt_entry , action , ttl , nonce ) :
OOOoo = lisp_map_referral ( )
OOOoo . record_count = 1
OOOoo . nonce = nonce
oOo = OOOoo . encode ( )
OOOoo . print_map_referral ( )
if 23 - 23: OoooooooOO + Oo0Ooo + iIii1I11I1II1
IiII1iiI = lisp_eid_record ( )
if 36 - 36: iIii1I11I1II1 - Ii1I
iI1111i = 0
if ( ddt_entry == None ) :
IiII1iiI . eid = eid
IiII1iiI . group = group
else :
iI1111i = len ( ddt_entry . delegation_set )
IiII1iiI . eid = ddt_entry . eid
IiII1iiI . group = ddt_entry . group
ddt_entry . map_referrals_sent += 1
if 54 - 54: I1IiiI
IiII1iiI . rloc_count = iI1111i
IiII1iiI . authoritative = True
if 92 - 92: O0 * OoooooooOO - i11iIiiIii % I1IiiI / Oo0Ooo - Oo0Ooo
if 26 - 26: i1IIi - II111iiii - Ii1I * i1IIi * OoOoOO00
if 99 - 99: IiII / oO0o % ooOoO0o / Oo0Ooo * OoO0O00
if 43 - 43: ooOoO0o
if 86 - 86: ooOoO0o
o000ooo0o0O = False
if ( action == LISP_DDT_ACTION_NULL ) :
if ( iI1111i == 0 ) :
action = LISP_DDT_ACTION_NODE_REFERRAL
else :
oOoI1I = ddt_entry . delegation_set [ 0 ]
if ( oOoI1I . is_ddt_child ( ) ) :
action = LISP_DDT_ACTION_NODE_REFERRAL
if 65 - 65: OoOoOO00
if ( oOoI1I . is_ms_child ( ) ) :
action = LISP_DDT_ACTION_MS_REFERRAL
if 15 - 15: Ii1I - OoOoOO00
if 27 - 27: O0
if 86 - 86: IiII + Ii1I / Oo0Ooo / O0 % iII111i - oO0o
if 3 - 3: i11iIiiIii / I1ii11iIi11i % I1Ii111 + o0oOOo0O0Ooo + O0
if 42 - 42: IiII / i11iIiiIii % o0oOOo0O0Ooo / II111iiii / IiII
if 97 - 97: OOooOOo . OoOoOO00 / I11i - IiII - iIii1I11I1II1
if 82 - 82: II111iiii + OoO0O00 % iIii1I11I1II1 / O0
if ( action == LISP_DDT_ACTION_NOT_AUTH ) : o000ooo0o0O = True
if ( action in ( LISP_DDT_ACTION_MS_REFERRAL , LISP_DDT_ACTION_MS_ACK ) ) :
o000ooo0o0O = ( lisp_i_am_ms and oOoI1I . is_ms_peer ( ) == False )
if 75 - 75: OOooOOo * OoO0O00 + OoooooooOO + i11iIiiIii . OoO0O00
if 94 - 94: I11i * ooOoO0o . I1IiiI / Ii1I - I1IiiI % OoooooooOO
IiII1iiI . action = action
IiII1iiI . ddt_incomplete = o000ooo0o0O
IiII1iiI . record_ttl = ttl
if 32 - 32: OoO0O00
oOo += IiII1iiI . encode ( )
IiII1iiI . print_record ( " " , True )
if 22 - 22: II111iiii . I11i
if ( iI1111i == 0 ) : return ( oOo )
if 61 - 61: OOooOOo % O0 . I1ii11iIi11i . iIii1I11I1II1 * I11i
for oOoI1I in ddt_entry . delegation_set :
o00o = lisp_rloc_record ( )
o00o . rloc = oOoI1I . delegate_address
o00o . priority = oOoI1I . priority
o00o . weight = oOoI1I . weight
o00o . mpriority = 255
o00o . mweight = 0
o00o . reach_bit = True
oOo += o00o . encode ( )
o00o . print_record ( " " )
if 29 - 29: ooOoO0o + i1IIi % IiII * Ii1I
return ( oOo )
if 94 - 94: OOooOOo / IiII
if 18 - 18: IiII - I11i / Ii1I % IiII * i1IIi
if 22 - 22: OoOoOO00 - Oo0Ooo
if 41 - 41: iIii1I11I1II1 * I1Ii111 / OoO0O00
if 33 - 33: I11i + O0
if 9 - 9: I11i . iII111i * ooOoO0o * ooOoO0o
if 68 - 68: O0 - i11iIiiIii % iIii1I11I1II1 % ooOoO0o
def lisp_etr_process_map_request ( lisp_sockets , map_request , source , sport ,
ttl ) :
if 12 - 12: II111iiii + I11i
if ( map_request . target_group . is_null ( ) ) :
iIiIIi1i = lisp_db_for_lookups . lookup_cache ( map_request . target_eid , False )
else :
iIiIIi1i = lisp_db_for_lookups . lookup_cache ( map_request . target_group , False )
if ( iIiIIi1i ) : iIiIIi1i = iIiIIi1i . lookup_source_cache ( map_request . target_eid , False )
if 92 - 92: Ii1I % o0oOOo0O0Ooo
oO00oo000O = map_request . print_prefix ( )
if 55 - 55: I11i + ooOoO0o / ooOoO0o % I1ii11iIi11i
if ( iIiIIi1i == None ) :
lprint ( "Database-mapping entry not found for requested EID {}" . format ( green ( oO00oo000O , False ) ) )
if 84 - 84: O0 + IiII - I1IiiI - I1Ii111 / OoooooooOO
return
if 76 - 76: i11iIiiIii - Ii1I * I1ii11iIi11i + oO0o - OOooOOo
if 42 - 42: o0oOOo0O0Ooo
I11Ii11ii = iIiIIi1i . print_eid_tuple ( )
if 72 - 72: ooOoO0o / iII111i + iII111i % i11iIiiIii . i1IIi
lprint ( "Found database-mapping EID-prefix {} for requested EID {}" . format ( green ( I11Ii11ii , False ) , green ( oO00oo000O , False ) ) )
if 53 - 53: oO0o
if 76 - 76: Ii1I % I1Ii111 % i1IIi
if 4 - 4: I11i % IiII - II111iiii - OoooooooOO / OOooOOo
if 44 - 44: IiII - OoooooooOO * O0 + II111iiii + IiII
if 82 - 82: OoO0O00 + OOooOOo + O0
o0oi1iIiii1I1ii = map_request . itr_rlocs [ 0 ]
if ( o0oi1iIiii1I1ii . is_private_address ( ) and lisp_nat_traversal ) :
o0oi1iIiii1I1ii = source
if 85 - 85: II111iiii * I1Ii111 / ooOoO0o
if 23 - 23: I1Ii111
i11III1I = map_request . nonce
O0o00oo = lisp_nonce_echoing
i1iIi = map_request . keys
if 52 - 52: iIii1I11I1II1
iIiIIi1i . map_replies_sent += 1
if 47 - 47: iIii1I11I1II1 + i1IIi % I1ii11iIi11i % O0 * Ii1I
oOo = lisp_build_map_reply ( iIiIIi1i . eid , iIiIIi1i . group , iIiIIi1i . rloc_set , i11III1I ,
LISP_NO_ACTION , 1440 , map_request . rloc_probe , i1iIi , O0o00oo , True , ttl )
if 85 - 85: I1Ii111 + I1Ii111 + OoOoOO00 / ooOoO0o / o0oOOo0O0Ooo . Oo0Ooo
if 41 - 41: i1IIi % Ii1I . i1IIi * OoooooooOO % Ii1I
if 21 - 21: iII111i
if 72 - 72: I11i % o0oOOo0O0Ooo . iIii1I11I1II1 - I1Ii111 / i11iIiiIii
if 75 - 75: OoooooooOO
if 24 - 24: oO0o % iII111i - II111iiii / Ii1I + O0
if 37 - 37: I1Ii111 - i1IIi / iIii1I11I1II1
if 53 - 53: Ii1I - iIii1I11I1II1 % I1ii11iIi11i * i11iIiiIii + ooOoO0o
if 63 - 63: Oo0Ooo * I1IiiI
if 84 - 84: Oo0Ooo
if 67 - 67: oO0o / II111iiii . I11i / oO0o
if 46 - 46: oO0o * Oo0Ooo - I11i / iIii1I11I1II1
if 100 - 100: i11iIiiIii % oO0o
if 62 - 62: OOooOOo * i1IIi - OOooOOo / i11iIiiIii
if 17 - 17: I1ii11iIi11i + ooOoO0o % Ii1I % OOooOOo
if 73 - 73: i11iIiiIii
if ( map_request . rloc_probe and len ( lisp_sockets ) == 4 ) :
ooo0ooOoOOoO = ( o0oi1iIiii1I1ii . is_private_address ( ) == False )
Ii111iI1iI1ii = o0oi1iIiii1I1ii . print_address_no_iid ( )
if ( ( ooo0ooOoOOoO and lisp_rtr_list . has_key ( Ii111iI1iI1ii ) ) or sport == 0 ) :
lisp_encapsulate_rloc_probe ( lisp_sockets , o0oi1iIiii1I1ii , None , oOo )
return
if 44 - 44: o0oOOo0O0Ooo % Ii1I - OoOoOO00 + OoOoOO00 * IiII + iII111i
if 58 - 58: I1ii11iIi11i / oO0o + i11iIiiIii * o0oOOo0O0Ooo
if 19 - 19: OoOoOO00
if 17 - 17: Oo0Ooo
if 76 - 76: II111iiii % I1ii11iIi11i
if 99 - 99: oO0o - I1Ii111
lisp_send_map_reply ( lisp_sockets , oOo , o0oi1iIiii1I1ii , sport )
return
if 29 - 29: I1IiiI - I11i
if 42 - 42: Oo0Ooo - O0 . OoOoOO00
if 4 - 4: IiII
if 2 - 2: iII111i
if 47 - 47: i1IIi % I11i
if 17 - 17: OoOoOO00 - iII111i % I11i / o0oOOo0O0Ooo / II111iiii
if 22 - 22: Oo0Ooo + I1ii11iIi11i % i11iIiiIii . OoO0O00 - I11i % I11i
def lisp_rtr_process_map_request ( lisp_sockets , map_request , source , sport ,
ttl ) :
if 21 - 21: I1IiiI . OoO0O00 * IiII % OoooooooOO - Oo0Ooo + Oo0Ooo
if 94 - 94: ooOoO0o
if 80 - 80: i11iIiiIii - O0 / I1Ii111 + OOooOOo % Oo0Ooo
if 95 - 95: II111iiii
o0oi1iIiii1I1ii = map_request . itr_rlocs [ 0 ]
if ( o0oi1iIiii1I1ii . is_private_address ( ) ) : o0oi1iIiii1I1ii = source
i11III1I = map_request . nonce
if 76 - 76: OoO0O00 % iII111i * OoOoOO00 / ooOoO0o / i1IIi
Oo00o = map_request . target_eid
i1i11Ii1 = map_request . target_group
if 45 - 45: Ii1I . I11i * I1Ii111 . i11iIiiIii
iiiI11II1IiIi = [ ]
for iIIII1iiIII in [ lisp_myrlocs [ 0 ] , lisp_myrlocs [ 1 ] ] :
if ( iIIII1iiIII == None ) : continue
Oo0o0o0oo = lisp_rloc ( )
Oo0o0o0oo . rloc . copy_address ( iIIII1iiIII )
Oo0o0o0oo . priority = 254
iiiI11II1IiIi . append ( Oo0o0o0oo )
if 68 - 68: ooOoO0o % OoooooooOO
if 94 - 94: Oo0Ooo * o0oOOo0O0Ooo
O0o00oo = lisp_nonce_echoing
i1iIi = map_request . keys
if 60 - 60: iII111i . OOooOOo
oOo = lisp_build_map_reply ( Oo00o , i1i11Ii1 , iiiI11II1IiIi , i11III1I , LISP_NO_ACTION ,
1440 , True , i1iIi , O0o00oo , True , ttl )
lisp_send_map_reply ( lisp_sockets , oOo , o0oi1iIiii1I1ii , sport )
return
if 39 - 39: O0 - i11iIiiIii - I1IiiI / Oo0Ooo - i11iIiiIii
if 30 - 30: OoO0O00 / OoOoOO00 + I1ii11iIi11i % IiII - OoO0O00
if 19 - 19: I1IiiI
if 99 - 99: OOooOOo - OOooOOo
if 98 - 98: o0oOOo0O0Ooo + O0 * oO0o - i11iIiiIii
if 83 - 83: o0oOOo0O0Ooo
if 23 - 23: o0oOOo0O0Ooo . I11i
if 67 - 67: iII111i
if 52 - 52: IiII . OoooooooOO
if 34 - 34: o0oOOo0O0Ooo / IiII . OoooooooOO . Oo0Ooo / ooOoO0o + O0
def lisp_get_private_rloc_set ( target_site_eid , seid , group ) :
iiiI11II1IiIi = target_site_eid . registered_rlocs
if 38 - 38: I11i
oOOoO = lisp_site_eid_lookup ( seid , group , False )
if ( oOOoO == None ) : return ( iiiI11II1IiIi )
if 79 - 79: I1IiiI * OOooOOo - I11i
if 60 - 60: o0oOOo0O0Ooo - OoO0O00 . O0 - i11iIiiIii . I1IiiI
if 95 - 95: OoooooooOO / ooOoO0o * I11i - Ii1I
if 94 - 94: I1Ii111 + OoO0O00 . OoooooooOO
o0oI1 = None
Oo = [ ]
for O0OO0O in iiiI11II1IiIi :
if ( O0OO0O . is_rtr ( ) ) : continue
if ( O0OO0O . rloc . is_private_address ( ) ) :
oo0OOOoOOOo0 = copy . deepcopy ( O0OO0O )
Oo . append ( oo0OOOoOOOo0 )
continue
if 90 - 90: Ii1I * I11i % I1Ii111 - I1ii11iIi11i * I1Ii111 % OoO0O00
o0oI1 = O0OO0O
break
if 50 - 50: iIii1I11I1II1
if ( o0oI1 == None ) : return ( iiiI11II1IiIi )
o0oI1 = o0oI1 . rloc . print_address_no_iid ( )
if 56 - 56: oO0o
if 55 - 55: iIii1I11I1II1 % oO0o % OOooOOo / I1Ii111 * OoooooooOO / Oo0Ooo
if 88 - 88: I11i + OoO0O00 . iIii1I11I1II1 . II111iiii
if 67 - 67: OOooOOo - ooOoO0o % iII111i % IiII
OOO0OOoo = None
for O0OO0O in oOOoO . registered_rlocs :
if ( O0OO0O . is_rtr ( ) ) : continue
if ( O0OO0O . rloc . is_private_address ( ) ) : continue
OOO0OOoo = O0OO0O
break
if 5 - 5: o0oOOo0O0Ooo + OoO0O00
if ( OOO0OOoo == None ) : return ( iiiI11II1IiIi )
OOO0OOoo = OOO0OOoo . rloc . print_address_no_iid ( )
if 28 - 28: OOooOOo
if 56 - 56: II111iiii
if 80 - 80: o0oOOo0O0Ooo . oO0o . I1Ii111
if 26 - 26: i1IIi - I1IiiI + IiII / OoO0O00 . I1ii11iIi11i
OOOoooO0o0o = target_site_eid . site_id
if ( OOOoooO0o0o == 0 ) :
if ( OOO0OOoo == o0oI1 ) :
lprint ( "Return private RLOCs for sites behind {}" . format ( o0oI1 ) )
if 82 - 82: I1Ii111 % iII111i . OoOoOO00 % OoO0O00 + I1ii11iIi11i
return ( Oo )
if 69 - 69: I1IiiI * OoOoOO00 - ooOoO0o . O0
return ( iiiI11II1IiIi )
if 15 - 15: oO0o . IiII + I1Ii111 - OoooooooOO
if 85 - 85: II111iiii - Oo0Ooo + oO0o . i11iIiiIii + Oo0Ooo
if 86 - 86: ooOoO0o . OoO0O00
if 47 - 47: IiII % I1IiiI
if 91 - 91: Ii1I
if 69 - 69: iII111i
if 96 - 96: Ii1I
if ( OOOoooO0o0o == oOOoO . site_id ) :
lprint ( "Return private RLOCs for sites in site-id {}" . format ( OOOoooO0o0o ) )
return ( Oo )
if 39 - 39: OoO0O00 - I1IiiI % II111iiii - IiII * I1ii11iIi11i
return ( iiiI11II1IiIi )
if 64 - 64: OOooOOo + Oo0Ooo . OoOoOO00 . OOooOOo + i11iIiiIii
if 7 - 7: ooOoO0o * I11i / iIii1I11I1II1
if 15 - 15: OoooooooOO / iII111i
if 40 - 40: o0oOOo0O0Ooo
if 75 - 75: oO0o - OoOoOO00 * ooOoO0o . O0
if 78 - 78: Oo0Ooo
if 74 - 74: O0 / I11i
if 52 - 52: I1IiiI + oO0o * II111iiii
if 15 - 15: I11i
def lisp_get_partial_rloc_set ( registered_rloc_set , mr_source , multicast ) :
oo0i11i11ii11 = [ ]
iiiI11II1IiIi = [ ]
if 49 - 49: iII111i % OoooooooOO
if 85 - 85: I1ii11iIi11i * OOooOOo - I1IiiI
if 76 - 76: iIii1I11I1II1
if 94 - 94: O0
if 50 - 50: I1Ii111 * o0oOOo0O0Ooo - ooOoO0o - I1ii11iIi11i % I1IiiI . ooOoO0o
if 35 - 35: Ii1I % i1IIi + I1IiiI
o0Ooo = False
Oo000oo0ooooO = False
for O0OO0O in registered_rloc_set :
if ( O0OO0O . priority != 254 ) : continue
Oo000oo0ooooO |= True
if ( O0OO0O . rloc . is_exact_match ( mr_source ) == False ) : continue
o0Ooo = True
break
if 53 - 53: i11iIiiIii / i1IIi . i1IIi + I11i
if 19 - 19: ooOoO0o . OoOoOO00 + Oo0Ooo + iIii1I11I1II1 . OoOoOO00 - I1IiiI
if 70 - 70: OOooOOo . OoOoOO00 . OOooOOo / iII111i
if 72 - 72: OoooooooOO + Ii1I + iIii1I11I1II1
if 13 - 13: iII111i . I1Ii111 % ooOoO0o / i1IIi
if 64 - 64: iII111i
if 9 - 9: I1ii11iIi11i + Oo0Ooo * I11i / I1Ii111 / I1ii11iIi11i / oO0o
if ( Oo000oo0ooooO == False ) : return ( registered_rloc_set )
if 48 - 48: Oo0Ooo % i1IIi / I1ii11iIi11i / oO0o + iII111i
if 47 - 47: Ii1I
if 75 - 75: II111iiii / OoOoOO00 - o0oOOo0O0Ooo % I1ii11iIi11i + OoO0O00
if 7 - 7: iII111i - OoO0O00 + ooOoO0o * iII111i
if 14 - 14: OoOoOO00 - OoOoOO00 / ooOoO0o
if 22 - 22: I1Ii111
if 59 - 59: I1Ii111
if 22 - 22: OoooooooOO
if 88 - 88: I1Ii111 - OoO0O00
if 29 - 29: I1IiiI . I1Ii111
OOOO = ( os . getenv ( "LISP_RTR_BEHIND_NAT" ) != None )
if 73 - 73: i11iIiiIii . OoO0O00 + ooOoO0o
if 77 - 77: ooOoO0o . I11i + OoooooooOO
if 100 - 100: ooOoO0o . oO0o % I1ii11iIi11i . IiII * IiII - o0oOOo0O0Ooo
if 49 - 49: iIii1I11I1II1 % Ii1I / OoooooooOO - II111iiii . Ii1I
if 65 - 65: OoooooooOO + I1Ii111 % ooOoO0o + II111iiii . i1IIi + OoooooooOO
for O0OO0O in registered_rloc_set :
if ( OOOO and O0OO0O . rloc . is_private_address ( ) ) : continue
if ( multicast == False and O0OO0O . priority == 255 ) : continue
if ( multicast and O0OO0O . mpriority == 255 ) : continue
if ( O0OO0O . priority == 254 ) :
oo0i11i11ii11 . append ( O0OO0O )
else :
iiiI11II1IiIi . append ( O0OO0O )
if 26 - 26: I1IiiI / II111iiii % I1ii11iIi11i * o0oOOo0O0Ooo . IiII / OoO0O00
if 10 - 10: i11iIiiIii / i1IIi + O0 - i11iIiiIii % I11i - i1IIi
if 38 - 38: O0 - I1IiiI + Oo0Ooo + ooOoO0o
if 56 - 56: I1Ii111 + oO0o / Ii1I + I1Ii111
if 21 - 21: OOooOOo / OoOoOO00 + OoOoOO00 + OoOoOO00 - i1IIi + Ii1I
if 43 - 43: O0 % II111iiii
if ( o0Ooo ) : return ( iiiI11II1IiIi )
if 60 - 60: iII111i / ooOoO0o - Ii1I - OoooooooOO
if 79 - 79: oO0o / iII111i . iIii1I11I1II1 * i11iIiiIii * i1IIi . iIii1I11I1II1
if 31 - 31: OoooooooOO / ooOoO0o / OoooooooOO + ooOoO0o . O0 - IiII
if 53 - 53: Oo0Ooo % iII111i % iII111i
if 71 - 71: iII111i
if 99 - 99: O0 - OoOoOO00 * I1Ii111 - Oo0Ooo
if 62 - 62: i1IIi + ooOoO0o + Oo0Ooo - i11iIiiIii
if 19 - 19: I1IiiI / OOooOOo
if 6 - 6: I1ii11iIi11i + IiII * oO0o * OoOoOO00
if 67 - 67: I1Ii111 + OoooooooOO + OoOoOO00 % iIii1I11I1II1 . I1IiiI
iiiI11II1IiIi = [ ]
for O0OO0O in registered_rloc_set :
if ( O0OO0O . rloc . is_private_address ( ) ) : iiiI11II1IiIi . append ( O0OO0O )
if 68 - 68: ooOoO0o
iiiI11II1IiIi += oo0i11i11ii11
return ( iiiI11II1IiIi )
if 68 - 68: I11i % IiII
if 1 - 1: I1IiiI + OOooOOo - OOooOOo * O0 + o0oOOo0O0Ooo * OOooOOo
if 48 - 48: ooOoO0o - iII111i + I1ii11iIi11i * I1Ii111 % ooOoO0o * OoO0O00
if 28 - 28: i1IIi / iII111i + OOooOOo
if 89 - 89: Oo0Ooo + II111iiii * OoO0O00 + Oo0Ooo % II111iiii
if 59 - 59: O0 + Oo0Ooo
if 63 - 63: OoO0O00 / I1IiiI / oO0o . Ii1I / i1IIi
if 50 - 50: I11i . I11i % I1IiiI - i1IIi
if 63 - 63: OoO0O00 . iII111i
if 28 - 28: ooOoO0o . Oo0Ooo - OoooooooOO - I1Ii111 - OoooooooOO - oO0o
def lisp_store_pubsub_state ( reply_eid , itr_rloc , mr_sport , nonce , ttl , xtr_id ) :
I1i11 = lisp_pubsub ( itr_rloc , mr_sport , nonce , ttl , xtr_id )
I1i11 . add ( reply_eid )
return
if 3 - 3: ooOoO0o / IiII
if 9 - 9: IiII
if 22 - 22: iII111i % i11iIiiIii / iIii1I11I1II1 % i1IIi + o0oOOo0O0Ooo
if 64 - 64: II111iiii / II111iiii + OoO0O00
if 70 - 70: Oo0Ooo * i11iIiiIii + IiII / OoOoOO00 . I1ii11iIi11i % OoOoOO00
if 12 - 12: I11i % II111iiii % O0 % O0
if 18 - 18: iII111i . IiII . I1IiiI
if 40 - 40: IiII / oO0o + OoooooooOO / iII111i / II111iiii + i1IIi
if 33 - 33: I11i + I1ii11iIi11i + i11iIiiIii * I1IiiI % oO0o % OoooooooOO
if 4 - 4: OoO0O00 . I1IiiI - O0 % iII111i . OOooOOo
if 69 - 69: OoooooooOO
if 19 - 19: O0 + iIii1I11I1II1 / OoOoOO00 / oO0o + II111iiii - OOooOOo
if 70 - 70: i1IIi * o0oOOo0O0Ooo + I1Ii111 . ooOoO0o - O0 + i11iIiiIii
if 81 - 81: iIii1I11I1II1 - OoO0O00 . i11iIiiIii
if 4 - 4: o0oOOo0O0Ooo / OoO0O00 - I11i
def lisp_convert_reply_to_notify ( packet ) :
if 52 - 52: II111iiii . iII111i
if 36 - 36: I1IiiI * II111iiii
if 68 - 68: oO0o * o0oOOo0O0Ooo + OoooooooOO - I1ii11iIi11i * i1IIi % OOooOOo
if 39 - 39: I1Ii111 / I11i + oO0o / I1Ii111 % IiII * I1ii11iIi11i
OOo00oOOo0OOO = struct . unpack ( "I" , packet [ 0 : 4 ] ) [ 0 ]
OOo00oOOo0OOO = socket . ntohl ( OOo00oOOo0OOO ) & 0xff
i11III1I = packet [ 4 : 12 ]
packet = packet [ 12 : : ]
if 6 - 6: iII111i . IiII - I1ii11iIi11i - Oo0Ooo - i1IIi
if 96 - 96: i1IIi . Oo0Ooo * i11iIiiIii / OoO0O00 / oO0o
if 12 - 12: iII111i % OOooOOo % i1IIi
if 17 - 17: IiII
O0oooOO = ( LISP_MAP_NOTIFY << 28 ) | OOo00oOOo0OOO
oooooOOo0Oo = struct . pack ( "I" , socket . htonl ( O0oooOO ) )
II11IiI1 = struct . pack ( "I" , 0 )
if 63 - 63: ooOoO0o . i11iIiiIii / iIii1I11I1II1
if 8 - 8: i11iIiiIii . IiII * iIii1I11I1II1 * I1IiiI * Ii1I * i11iIiiIii
if 24 - 24: I1IiiI * I11i - o0oOOo0O0Ooo / iII111i + IiII - I1ii11iIi11i
if 53 - 53: I11i / I1IiiI - iIii1I11I1II1 - o0oOOo0O0Ooo * OoOoOO00
packet = oooooOOo0Oo + i11III1I + II11IiI1 + packet
return ( packet )
if 86 - 86: iIii1I11I1II1 - I1Ii111
if 86 - 86: O0 * IiII + OoOoOO00 + OoO0O00
if 53 - 53: I1IiiI % i11iIiiIii + o0oOOo0O0Ooo . I1ii11iIi11i
if 73 - 73: iII111i - o0oOOo0O0Ooo / OOooOOo + iII111i + o0oOOo0O0Ooo % II111iiii
if 74 - 74: I11i * iIii1I11I1II1 - OoO0O00 / i1IIi / OoO0O00 / IiII
if 60 - 60: oO0o % I1Ii111 % Oo0Ooo
if 34 - 34: o0oOOo0O0Ooo * OOooOOo % Ii1I + I1IiiI
if 77 - 77: OoOoOO00 + IiII + Oo0Ooo
def lisp_notify_subscribers ( lisp_sockets , eid_record , eid , site ) :
oO00oo000O = eid . print_prefix ( )
if ( lisp_pubsub_cache . has_key ( oO00oo000O ) == False ) : return
if 88 - 88: i1IIi
for I1i11 in lisp_pubsub_cache [ oO00oo000O ] . values ( ) :
OooOoOOo0 = I1i11 . itr
Iiiii = I1i11 . port
I1iIIiiiiI = red ( OooOoOOo0 . print_address_no_iid ( ) , False )
O0O00 = bold ( "subscriber" , False )
Oo0O0 = "0x" + lisp_hex_string ( I1i11 . xtr_id )
i11III1I = "0x" + lisp_hex_string ( I1i11 . nonce )
if 85 - 85: OoO0O00
lprint ( " Notify {} {}:{} xtr-id {} for {}, nonce {}" . format ( O0O00 , I1iIIiiiiI , Iiiii , Oo0O0 , green ( oO00oo000O , False ) , i11III1I ) )
if 20 - 20: OOooOOo % OoooooooOO + i1IIi + I1Ii111
if 94 - 94: II111iiii + i11iIiiIii % Ii1I / ooOoO0o * OoOoOO00
lisp_build_map_notify ( lisp_sockets , eid_record , [ oO00oo000O ] , 1 , OooOoOOo0 ,
Iiiii , I1i11 . nonce , 0 , 0 , 0 , site , False )
I1i11 . map_notify_count += 1
if 68 - 68: O0 / Oo0Ooo / iIii1I11I1II1
return
if 63 - 63: I1Ii111 + iII111i
if 6 - 6: I1ii11iIi11i + Ii1I
if 36 - 36: iII111i + iII111i * OoO0O00 * I1ii11iIi11i
if 97 - 97: ooOoO0o + OOooOOo
if 70 - 70: o0oOOo0O0Ooo + Ii1I - i11iIiiIii + I11i * o0oOOo0O0Ooo . Ii1I
if 6 - 6: Oo0Ooo + I1IiiI
if 48 - 48: oO0o . I1ii11iIi11i
def lisp_process_pubsub ( lisp_sockets , packet , reply_eid , itr_rloc , port , nonce ,
ttl , xtr_id ) :
if 59 - 59: IiII - Ii1I
if 62 - 62: OOooOOo * o0oOOo0O0Ooo + IiII * o0oOOo0O0Ooo * i11iIiiIii - O0
if 37 - 37: I1ii11iIi11i - Oo0Ooo . i11iIiiIii / i11iIiiIii + oO0o
if 19 - 19: i1IIi / i1IIi - OoooooooOO - OOooOOo . i1IIi
lisp_store_pubsub_state ( reply_eid , itr_rloc , port , nonce , ttl , xtr_id )
if 57 - 57: OOooOOo / I1ii11iIi11i * oO0o
Oo00o = green ( reply_eid . print_prefix ( ) , False )
OooOoOOo0 = red ( itr_rloc . print_address_no_iid ( ) , False )
oO0OOO0o0oooO = bold ( "Map-Notify" , False )
xtr_id = "0x" + lisp_hex_string ( xtr_id )
lprint ( "{} pubsub request for {} to ack ITR {} xtr-id: {}" . format ( oO0OOO0o0oooO ,
Oo00o , OooOoOOo0 , xtr_id ) )
if 10 - 10: OoO0O00 % II111iiii
if 28 - 28: II111iiii + OoOoOO00 . Ii1I - Ii1I % I1ii11iIi11i
if 44 - 44: OOooOOo - o0oOOo0O0Ooo
if 69 - 69: IiII + I1ii11iIi11i / o0oOOo0O0Ooo / OOooOOo
packet = lisp_convert_reply_to_notify ( packet )
lisp_send_map_notify ( lisp_sockets , packet , itr_rloc , port )
return
if 31 - 31: oO0o + I1ii11iIi11i * i1IIi % I1IiiI % I1IiiI + iIii1I11I1II1
if 62 - 62: OoooooooOO
if 38 - 38: iII111i % iII111i * ooOoO0o / OoO0O00 + ooOoO0o
if 52 - 52: ooOoO0o . iIii1I11I1II1 / iIii1I11I1II1 % oO0o - oO0o * II111iiii
if 57 - 57: I1Ii111
if 23 - 23: I1ii11iIi11i + II111iiii
if 99 - 99: o0oOOo0O0Ooo . I1IiiI + o0oOOo0O0Ooo * o0oOOo0O0Ooo / O0
if 27 - 27: OOooOOo - I1Ii111
def lisp_ms_process_map_request ( lisp_sockets , packet , map_request , mr_source ,
mr_sport , ecm_source ) :
if 33 - 33: OOooOOo - Ii1I - iII111i + I1ii11iIi11i - i11iIiiIii
if 89 - 89: iIii1I11I1II1 * I11i + OOooOOo
if 27 - 27: i1IIi - OoO0O00
if 23 - 23: iIii1I11I1II1 + Oo0Ooo * IiII
if 80 - 80: OoooooooOO . ooOoO0o
if 52 - 52: O0 + O0 + I1IiiI
Oo00o = map_request . target_eid
i1i11Ii1 = map_request . target_group
oO00oo000O = lisp_print_eid_tuple ( Oo00o , i1i11Ii1 )
o0oi1iIiii1I1ii = map_request . itr_rlocs [ 0 ]
Oo0O0 = map_request . xtr_id
i11III1I = map_request . nonce
O0oo0oo0 = LISP_NO_ACTION
I1i11 = map_request . subscribe_bit
if 64 - 64: ooOoO0o
if 35 - 35: I1IiiI . iIii1I11I1II1 + IiII / i11iIiiIii - II111iiii . OoooooooOO
if 19 - 19: IiII - OoOoOO00
if 43 - 43: IiII / OOooOOo % II111iiii . o0oOOo0O0Ooo / i11iIiiIii
if 5 - 5: oO0o % iII111i . Oo0Ooo . O0 . OoOoOO00 / iII111i
O0OOOooo = True
o0o0Ooo0OO00o = ( lisp_get_eid_hash ( Oo00o ) != None )
if ( o0o0Ooo0OO00o ) :
o0o000OOO = map_request . map_request_signature
if ( o0o000OOO == None ) :
O0OOOooo = False
lprint ( ( "EID-crypto-hash signature verification {}, " + "no signature found" ) . format ( bold ( "failed" , False ) ) )
if 5 - 5: I1ii11iIi11i
else :
oo0o0Oo = map_request . signature_eid
IIIIIiI , oooo0 , O0OOOooo = lisp_lookup_public_key ( oo0o0Oo )
if ( O0OOOooo ) :
O0OOOooo = map_request . verify_map_request_sig ( oooo0 )
else :
lprint ( "Public-key lookup failed for sig-eid {}, hash-eid {}" . format ( oo0o0Oo . print_address ( ) , IIIIIiI . print_address ( ) ) )
if 18 - 18: IiII + i1IIi / O0
if 42 - 42: OoO0O00 - OoOoOO00 . I1IiiI
iii = bold ( "passed" , False ) if O0OOOooo else bold ( "failed" , False )
lprint ( "EID-crypto-hash signature verification {}" . format ( iii ) )
if 63 - 63: OoOoOO00 . i11iIiiIii / IiII
if 36 - 36: OOooOOo * OoOoOO00 + i11iIiiIii + O0 + O0
if 18 - 18: Oo0Ooo . I1ii11iIi11i * ooOoO0o % Ii1I + I1ii11iIi11i
if ( I1i11 and O0OOOooo == False ) :
I1i11 = False
lprint ( "Suppress creating pubsub state due to signature failure" )
if 23 - 23: oO0o / o0oOOo0O0Ooo + I11i % IiII * OoO0O00
if 48 - 48: OoO0O00
if 30 - 30: iIii1I11I1II1
if 53 - 53: II111iiii
if 40 - 40: Ii1I % oO0o
if 69 - 69: iIii1I11I1II1 - O0 . I1Ii111 % I1IiiI / o0oOOo0O0Ooo
if 78 - 78: oO0o
if 20 - 20: i1IIi + i1IIi * i1IIi
if 32 - 32: I1IiiI + IiII + iII111i . iIii1I11I1II1 * Ii1I
if 27 - 27: oO0o + Ii1I . i11iIiiIii
if 97 - 97: iII111i . I1IiiI
if 71 - 71: OOooOOo - IiII % oO0o * I1ii11iIi11i
if 48 - 48: o0oOOo0O0Ooo * iIii1I11I1II1 + Oo0Ooo
if 45 - 45: oO0o
I1i1iiII1iI1i = o0oi1iIiii1I1ii if ( o0oi1iIiii1I1ii . afi == ecm_source . afi ) else ecm_source
if 72 - 72: I1ii11iIi11i
ooO00oO0O = lisp_site_eid_lookup ( Oo00o , i1i11Ii1 , False )
if 93 - 93: ooOoO0o % i1IIi + OoOoOO00 * IiII - IiII * i11iIiiIii
if ( ooO00oO0O == None or ooO00oO0O . is_star_g ( ) ) :
iIo0OO0O000 = bold ( "Site not found" , False )
lprint ( "{} for requested EID {}" . format ( iIo0OO0O000 ,
green ( oO00oo000O , False ) ) )
if 66 - 66: O0 % o0oOOo0O0Ooo - I11i * oO0o . I1Ii111
if 23 - 23: O0 - I1ii11iIi11i / O0 % i11iIiiIii + iIii1I11I1II1 / OOooOOo
if 67 - 67: iII111i + OOooOOo % iII111i + IiII
if 79 - 79: OOooOOo
lisp_send_negative_map_reply ( lisp_sockets , Oo00o , i1i11Ii1 , i11III1I , o0oi1iIiii1I1ii ,
mr_sport , 15 , Oo0O0 , I1i11 )
if 47 - 47: IiII - I1ii11iIi11i . OOooOOo + I1Ii111 % I1IiiI
return ( [ Oo00o , i1i11Ii1 , LISP_DDT_ACTION_SITE_NOT_FOUND ] )
if 3 - 3: I1IiiI / Oo0Ooo - Ii1I
if 69 - 69: iIii1I11I1II1 % iII111i + ooOoO0o * i1IIi + iII111i * I1Ii111
I11Ii11ii = ooO00oO0O . print_eid_tuple ( )
O0OOoOO000 = ooO00oO0O . site . site_name
if 42 - 42: iIii1I11I1II1 + iIii1I11I1II1 . I11i
if 27 - 27: OoOoOO00 * Oo0Ooo - ooOoO0o
if 93 - 93: OOooOOo * o0oOOo0O0Ooo / oO0o + Ii1I - OoooooooOO
if 15 - 15: O0
if 21 - 21: OoO0O00 * iIii1I11I1II1 - iIii1I11I1II1 % OoO0O00 . I1ii11iIi11i
if ( o0o0Ooo0OO00o == False and ooO00oO0O . require_signature ) :
o0o000OOO = map_request . map_request_signature
oo0o0Oo = map_request . signature_eid
if ( o0o000OOO == None or oo0o0Oo . is_null ( ) ) :
lprint ( "Signature required for site {}" . format ( O0OOoOO000 ) )
O0OOOooo = False
else :
oo0o0Oo = map_request . signature_eid
IIIIIiI , oooo0 , O0OOOooo = lisp_lookup_public_key ( oo0o0Oo )
if ( O0OOOooo ) :
O0OOOooo = map_request . verify_map_request_sig ( oooo0 )
else :
lprint ( "Public-key lookup failed for sig-eid {}, hash-eid {}" . format ( oo0o0Oo . print_address ( ) , IIIIIiI . print_address ( ) ) )
if 19 - 19: i1IIi % Ii1I . OoOoOO00
if 22 - 22: iIii1I11I1II1 + Ii1I
iii = bold ( "passed" , False ) if O0OOOooo else bold ( "failed" , False )
lprint ( "Required signature verification {}" . format ( iii ) )
if 73 - 73: I1IiiI / OoO0O00 / OoooooooOO
if 14 - 14: ooOoO0o % o0oOOo0O0Ooo / I1ii11iIi11i . IiII + I1ii11iIi11i
if 30 - 30: I1ii11iIi11i + iIii1I11I1II1 . I1ii11iIi11i
if 9 - 9: I1IiiI - Ii1I * II111iiii - I11i
if 85 - 85: oO0o % ooOoO0o / OOooOOo
if 50 - 50: O0 * O0 / iIii1I11I1II1
if ( O0OOOooo and ooO00oO0O . registered == False ) :
lprint ( "Site '{}' with EID-prefix {} is not registered for EID {}" . format ( O0OOoOO000 , green ( I11Ii11ii , False ) , green ( oO00oo000O , False ) ) )
if 31 - 31: I1IiiI / o0oOOo0O0Ooo
if 70 - 70: I1IiiI
if 36 - 36: ooOoO0o . oO0o . I11i - I1ii11iIi11i / OoOoOO00 * Oo0Ooo
if 42 - 42: OoooooooOO / o0oOOo0O0Ooo . Ii1I * iII111i * I1IiiI - Oo0Ooo
if 76 - 76: oO0o * II111iiii
if 81 - 81: I11i
if ( ooO00oO0O . accept_more_specifics == False ) :
Oo00o = ooO00oO0O . eid
i1i11Ii1 = ooO00oO0O . group
if 2 - 2: OoOoOO00
if 75 - 75: I1IiiI - OoooooooOO * I1Ii111
if 1 - 1: o0oOOo0O0Ooo % oO0o * I1Ii111 - i1IIi - iII111i . oO0o
if 25 - 25: i1IIi * o0oOOo0O0Ooo / oO0o
if 11 - 11: IiII + II111iiii
Ii1 = 1
if ( ooO00oO0O . force_ttl != None ) :
Ii1 = ooO00oO0O . force_ttl | 0x80000000
if 37 - 37: O0
if 98 - 98: IiII * OoooooooOO . iII111i
if 34 - 34: OoooooooOO + I1Ii111
if 97 - 97: II111iiii + I11i + OOooOOo / i11iIiiIii - iII111i
if 9 - 9: i1IIi - I1Ii111 + I1Ii111
lisp_send_negative_map_reply ( lisp_sockets , Oo00o , i1i11Ii1 , i11III1I , o0oi1iIiii1I1ii ,
mr_sport , Ii1 , Oo0O0 , I1i11 )
if 81 - 81: II111iiii % I11i % O0 . I1Ii111 % ooOoO0o - O0
return ( [ Oo00o , i1i11Ii1 , LISP_DDT_ACTION_MS_NOT_REG ] )
if 58 - 58: OoooooooOO . II111iiii . O0 % I1Ii111 / OoooooooOO
if 64 - 64: Oo0Ooo + oO0o . OoO0O00
if 67 - 67: I11i
if 91 - 91: OOooOOo / OoO0O00
if 36 - 36: I1IiiI . iII111i * I1Ii111 . IiII % I1ii11iIi11i
I1II1i1Ii1 = False
OoOooo = ""
I1IIii1Ii111i = False
if ( ooO00oO0O . force_nat_proxy_reply ) :
OoOooo = ", nat-forced"
I1II1i1Ii1 = True
I1IIii1Ii111i = True
elif ( ooO00oO0O . force_proxy_reply ) :
OoOooo = ", forced"
I1IIii1Ii111i = True
elif ( ooO00oO0O . proxy_reply_requested ) :
OoOooo = ", requested"
I1IIii1Ii111i = True
elif ( map_request . pitr_bit and ooO00oO0O . pitr_proxy_reply_drop ) :
OoOooo = ", drop-to-pitr"
O0oo0oo0 = LISP_DROP_ACTION
elif ( ooO00oO0O . proxy_reply_action != "" ) :
O0oo0oo0 = ooO00oO0O . proxy_reply_action
OoOooo = ", forced, action {}" . format ( O0oo0oo0 )
O0oo0oo0 = LISP_DROP_ACTION if ( O0oo0oo0 == "drop" ) else LISP_NATIVE_FORWARD_ACTION
if 46 - 46: oO0o . I1IiiI - oO0o / ooOoO0o - Oo0Ooo + II111iiii
if 8 - 8: I1Ii111 + i1IIi - IiII + I1IiiI
if 61 - 61: OoO0O00
if 96 - 96: ooOoO0o - OoooooooOO * iIii1I11I1II1 . IiII - O0
if 7 - 7: iIii1I11I1II1 . OoO0O00
if 88 - 88: i1IIi * II111iiii / i11iIiiIii % IiII . IiII
if 93 - 93: OoOoOO00 * i1IIi . Ii1I
i11i = False
i1Ii1I1IIII = None
if ( I1IIii1Ii111i and lisp_policies . has_key ( ooO00oO0O . policy ) ) :
i111 = lisp_policies [ ooO00oO0O . policy ]
if ( i111 . match_policy_map_request ( map_request , mr_source ) ) : i1Ii1I1IIII = i111
if 54 - 54: OoO0O00 * OoOoOO00 + o0oOOo0O0Ooo . IiII
if ( i1Ii1I1IIII ) :
o0oO0oo = bold ( "matched" , False )
lprint ( "Map-Request {} policy '{}', set-action '{}'" . format ( o0oO0oo ,
i111 . policy_name , i111 . set_action ) )
else :
o0oO0oo = bold ( "no match" , False )
lprint ( "Map-Request {} for policy '{}', implied drop" . format ( o0oO0oo ,
i111 . policy_name ) )
i11i = True
if 87 - 87: i11iIiiIii . OoooooooOO - II111iiii
if 69 - 69: iII111i
if 70 - 70: O0 + iII111i % I11i % I1Ii111 + OoOoOO00 / ooOoO0o
if ( OoOooo != "" ) :
lprint ( "Proxy-replying for EID {}, found site '{}' EID-prefix {}{}" . format ( green ( oO00oo000O , False ) , O0OOoOO000 , green ( I11Ii11ii , False ) ,
# I1IiiI / OoO0O00 * iII111i
OoOooo ) )
if 99 - 99: OoooooooOO / OoO0O00 * Ii1I % I11i
iiiI11II1IiIi = ooO00oO0O . registered_rlocs
Ii1 = 1440
if ( I1II1i1Ii1 ) :
if ( ooO00oO0O . site_id != 0 ) :
oOoO = map_request . source_eid
iiiI11II1IiIi = lisp_get_private_rloc_set ( ooO00oO0O , oOoO , i1i11Ii1 )
if 80 - 80: Oo0Ooo / OOooOOo / iII111i . o0oOOo0O0Ooo
if ( iiiI11II1IiIi == ooO00oO0O . registered_rlocs ) :
i1ii1I11iIII = ( ooO00oO0O . group . is_null ( ) == False )
Oo = lisp_get_partial_rloc_set ( iiiI11II1IiIi , I1i1iiII1iI1i , i1ii1I11iIII )
if ( Oo != iiiI11II1IiIi ) :
Ii1 = 15
iiiI11II1IiIi = Oo
if 8 - 8: OoO0O00
if 58 - 58: OoooooooOO . i1IIi
if 71 - 71: iII111i + ooOoO0o * OoOoOO00 . I1ii11iIi11i . I1Ii111
if 91 - 91: oO0o - Oo0Ooo % OoOoOO00 % o0oOOo0O0Ooo
if 71 - 71: i1IIi % iII111i * I1Ii111
if 36 - 36: I1ii11iIi11i % II111iiii % I1Ii111 / I1ii11iIi11i
if 34 - 34: OoooooooOO * i11iIiiIii
if 33 - 33: II111iiii
if ( ooO00oO0O . force_ttl != None ) :
Ii1 = ooO00oO0O . force_ttl | 0x80000000
if 59 - 59: iIii1I11I1II1 % I11i
if 93 - 93: I1ii11iIi11i
if 50 - 50: ooOoO0o % OoO0O00 % OoO0O00
if 36 - 36: I1IiiI * O0 . IiII / I1Ii111
if 15 - 15: I11i + iII111i
if 79 - 79: i11iIiiIii * IiII % iII111i
if ( i1Ii1I1IIII ) :
if ( i1Ii1I1IIII . set_record_ttl ) :
Ii1 = i1Ii1I1IIII . set_record_ttl
lprint ( "Policy set-record-ttl to {}" . format ( Ii1 ) )
if 18 - 18: iIii1I11I1II1 - O0 . o0oOOo0O0Ooo % oO0o
if ( i1Ii1I1IIII . set_action == "drop" ) :
lprint ( "Policy set-action drop, send negative Map-Reply" )
O0oo0oo0 = LISP_POLICY_DENIED_ACTION
iiiI11II1IiIi = [ ]
else :
Oo0o0o0oo = i1Ii1I1IIII . set_policy_map_reply ( )
if ( Oo0o0o0oo ) : iiiI11II1IiIi = [ Oo0o0o0oo ]
if 73 - 73: IiII + I11i % I1IiiI * iII111i . O0
if 17 - 17: OoO0O00 * OoOoOO00 % O0 % iII111i / i1IIi
if 100 - 100: i11iIiiIii
if ( i11i ) :
lprint ( "Implied drop action, send negative Map-Reply" )
O0oo0oo0 = LISP_POLICY_DENIED_ACTION
iiiI11II1IiIi = [ ]
if 54 - 54: O0 * Ii1I + Ii1I
if 59 - 59: i11iIiiIii % iII111i
O0o00oo = ooO00oO0O . echo_nonce_capable
if 54 - 54: I11i . ooOoO0o / OOooOOo % I1Ii111
if 13 - 13: I11i / O0 . o0oOOo0O0Ooo . ooOoO0o
if 7 - 7: OoO0O00 + OoooooooOO % II111iiii % oO0o
if 48 - 48: OOooOOo . II111iiii * OOooOOo - I11i / iIii1I11I1II1 / i11iIiiIii
if ( O0OOOooo ) :
IiIiIiiII1I = ooO00oO0O . eid
I11ii1I11ii = ooO00oO0O . group
else :
IiIiIiiII1I = Oo00o
I11ii1I11ii = i1i11Ii1
O0oo0oo0 = LISP_AUTH_FAILURE_ACTION
iiiI11II1IiIi = [ ]
if 38 - 38: iII111i % Ii1I - I1ii11iIi11i * I1Ii111 % iII111i
if 50 - 50: Oo0Ooo + o0oOOo0O0Ooo . OoOoOO00
if 8 - 8: O0 - i1IIi * oO0o + II111iiii . OoOoOO00
if 4 - 4: I1IiiI - OoO0O00 % o0oOOo0O0Ooo
if 83 - 83: iII111i % iIii1I11I1II1 / OOooOOo - OoOoOO00
if 98 - 98: I11i % oO0o . I1IiiI % OoOoOO00
packet = lisp_build_map_reply ( IiIiIiiII1I , I11ii1I11ii , iiiI11II1IiIi ,
i11III1I , O0oo0oo0 , Ii1 , False , None , O0o00oo , False )
if 32 - 32: I1ii11iIi11i / Ii1I
if ( I1i11 ) :
lisp_process_pubsub ( lisp_sockets , packet , IiIiIiiII1I , o0oi1iIiii1I1ii ,
mr_sport , i11III1I , Ii1 , Oo0O0 )
else :
lisp_send_map_reply ( lisp_sockets , packet , o0oi1iIiii1I1ii , mr_sport )
if 54 - 54: I11i - i11iIiiIii
if 91 - 91: Ii1I - OoO0O00 - I1IiiI % OoO0O00 . o0oOOo0O0Ooo
return ( [ ooO00oO0O . eid , ooO00oO0O . group , LISP_DDT_ACTION_MS_ACK ] )
if 85 - 85: ooOoO0o . ooOoO0o % Oo0Ooo . OOooOOo + OOooOOo / I1IiiI
if 69 - 69: i1IIi + II111iiii / Ii1I
if 4 - 4: I11i * OoOoOO00 % o0oOOo0O0Ooo % ooOoO0o - I1ii11iIi11i
if 88 - 88: iIii1I11I1II1 * iIii1I11I1II1 * I11i * OoOoOO00
if 14 - 14: i11iIiiIii * I1IiiI % O0 % iIii1I11I1II1
iI1111i = len ( ooO00oO0O . registered_rlocs )
if ( iI1111i == 0 ) :
lprint ( "Requested EID {} found site '{}' with EID-prefix {} with " + "no registered RLOCs" . format ( green ( oO00oo000O , False ) , O0OOoOO000 ,
# OOooOOo + Oo0Ooo
green ( I11Ii11ii , False ) ) )
return ( [ ooO00oO0O . eid , ooO00oO0O . group , LISP_DDT_ACTION_MS_ACK ] )
if 50 - 50: II111iiii * i11iIiiIii * I1IiiI % OoOoOO00 + o0oOOo0O0Ooo
if 26 - 26: iIii1I11I1II1 * II111iiii
if 83 - 83: II111iiii . OoOoOO00 - i11iIiiIii . OoOoOO00 . i1IIi % OoooooooOO
if 47 - 47: II111iiii
if 30 - 30: i1IIi . Oo0Ooo / o0oOOo0O0Ooo + IiII * OOooOOo
I1Ii1i11II = map_request . target_eid if map_request . source_eid . is_null ( ) else map_request . source_eid
if 75 - 75: II111iiii - OoooooooOO * II111iiii + iIii1I11I1II1 - OoooooooOO * O0
ooo000 = map_request . target_eid . hash_address ( I1Ii1i11II )
ooo000 %= iI1111i
Ii11 = ooO00oO0O . registered_rlocs [ ooo000 ]
if 69 - 69: Ii1I . Oo0Ooo . iII111i . i1IIi . i1IIi
if ( Ii11 . rloc . is_null ( ) ) :
lprint ( ( "Suppress forwarding Map-Request for EID {} at site '{}' " + "EID-prefix {}, no RLOC address" ) . format ( green ( oO00oo000O , False ) ,
# I11i / IiII . OoOoOO00 % iII111i . ooOoO0o
O0OOoOO000 , green ( I11Ii11ii , False ) ) )
else :
lprint ( ( "Forwarding Map-Request for EID {} to ETR {} at site '{}' " + "EID-prefix {}" ) . format ( green ( oO00oo000O , False ) ,
# iIii1I11I1II1 / oO0o . iIii1I11I1II1 . II111iiii % oO0o
red ( Ii11 . rloc . print_address ( ) , False ) , O0OOoOO000 ,
green ( I11Ii11ii , False ) ) )
if 12 - 12: oO0o * i11iIiiIii . I11i . i1IIi - Oo0Ooo % iIii1I11I1II1
if 8 - 8: oO0o / I1Ii111 + I1Ii111 - Oo0Ooo % i1IIi
if 27 - 27: Ii1I
if 12 - 12: OOooOOo . oO0o % I1IiiI % OoO0O00 % I11i
lisp_send_ecm ( lisp_sockets , packet , map_request . source_eid , mr_sport ,
map_request . target_eid , Ii11 . rloc , to_etr = True )
if 54 - 54: i1IIi / ooOoO0o % ooOoO0o / iIii1I11I1II1 + Oo0Ooo - o0oOOo0O0Ooo
return ( [ ooO00oO0O . eid , ooO00oO0O . group , LISP_DDT_ACTION_MS_ACK ] )
if 27 - 27: OOooOOo % OoooooooOO * OoooooooOO / I1ii11iIi11i
if 60 - 60: OOooOOo - I11i * IiII - o0oOOo0O0Ooo / I1IiiI
if 93 - 93: OoOoOO00 . O0 - OOooOOo
if 90 - 90: Oo0Ooo % iII111i % Oo0Ooo * I11i / OoOoOO00
if 49 - 49: I1ii11iIi11i * II111iiii
if 59 - 59: OoO0O00
if 81 - 81: i11iIiiIii
def lisp_ddt_process_map_request ( lisp_sockets , map_request , ecm_source , port ) :
if 57 - 57: Oo0Ooo * iIii1I11I1II1 - OoOoOO00 % iII111i % I1ii11iIi11i + Ii1I
if 82 - 82: IiII * Oo0Ooo - iIii1I11I1II1 - i11iIiiIii
if 85 - 85: OoooooooOO
if 37 - 37: OoooooooOO + O0 + I1ii11iIi11i + IiII * iII111i
Oo00o = map_request . target_eid
i1i11Ii1 = map_request . target_group
oO00oo000O = lisp_print_eid_tuple ( Oo00o , i1i11Ii1 )
i11III1I = map_request . nonce
O0oo0oo0 = LISP_DDT_ACTION_NULL
if 15 - 15: i11iIiiIii / Oo0Ooo - OOooOOo . IiII
if 11 - 11: OOooOOo / i1IIi % Oo0Ooo
if 65 - 65: OOooOOo % I1ii11iIi11i
if 25 - 25: o0oOOo0O0Ooo - I1Ii111 * I1ii11iIi11i + OoooooooOO
if 93 - 93: OoOoOO00 % I1ii11iIi11i * I11i
I1II111i1 = None
if ( lisp_i_am_ms ) :
ooO00oO0O = lisp_site_eid_lookup ( Oo00o , i1i11Ii1 , False )
if ( ooO00oO0O == None ) : return
if 73 - 73: OOooOOo * iII111i * OoO0O00
if ( ooO00oO0O . registered ) :
O0oo0oo0 = LISP_DDT_ACTION_MS_ACK
Ii1 = 1440
else :
Oo00o , i1i11Ii1 , O0oo0oo0 = lisp_ms_compute_neg_prefix ( Oo00o , i1i11Ii1 )
O0oo0oo0 = LISP_DDT_ACTION_MS_NOT_REG
Ii1 = 1
if 11 - 11: I1Ii111 * II111iiii
else :
I1II111i1 = lisp_ddt_cache_lookup ( Oo00o , i1i11Ii1 , False )
if ( I1II111i1 == None ) :
O0oo0oo0 = LISP_DDT_ACTION_NOT_AUTH
Ii1 = 0
lprint ( "DDT delegation entry not found for EID {}" . format ( green ( oO00oo000O , False ) ) )
if 3 - 3: Oo0Ooo * OOooOOo
elif ( I1II111i1 . is_auth_prefix ( ) ) :
if 13 - 13: I1Ii111 + i11iIiiIii / OOooOOo
if 98 - 98: I1IiiI * Oo0Ooo
if 9 - 9: O0 / i11iIiiIii . iIii1I11I1II1 . IiII
if 14 - 14: OoOoOO00 . OOooOOo - Oo0Ooo + I1Ii111 % ooOoO0o
O0oo0oo0 = LISP_DDT_ACTION_DELEGATION_HOLE
Ii1 = 15
oOOooo0o000O0 = I1II111i1 . print_eid_tuple ( )
lprint ( ( "DDT delegation entry not found but auth-prefix {} " + "found for EID {}" ) . format ( oOOooo0o000O0 ,
# OoO0O00
green ( oO00oo000O , False ) ) )
if 97 - 97: I1ii11iIi11i % ooOoO0o . i11iIiiIii . Oo0Ooo
if ( i1i11Ii1 . is_null ( ) ) :
Oo00o = lisp_ddt_compute_neg_prefix ( Oo00o , I1II111i1 ,
lisp_ddt_cache )
else :
i1i11Ii1 = lisp_ddt_compute_neg_prefix ( i1i11Ii1 , I1II111i1 ,
lisp_ddt_cache )
Oo00o = lisp_ddt_compute_neg_prefix ( Oo00o , I1II111i1 ,
I1II111i1 . source_cache )
if 49 - 49: OoooooooOO . II111iiii - o0oOOo0O0Ooo * I1ii11iIi11i * Ii1I
I1II111i1 = None
else :
oOOooo0o000O0 = I1II111i1 . print_eid_tuple ( )
lprint ( "DDT delegation entry {} found for EID {}" . format ( oOOooo0o000O0 , green ( oO00oo000O , False ) ) )
if 98 - 98: IiII + I1Ii111 . iIii1I11I1II1 + OoooooooOO . I1ii11iIi11i - O0
Ii1 = 1440
if 46 - 46: iII111i
if 99 - 99: oO0o
if 85 - 85: I1Ii111 * iIii1I11I1II1 . OoOoOO00
if 20 - 20: I11i * O0 - OoooooooOO * OOooOOo % oO0o * iII111i
if 70 - 70: I11i + O0 . i11iIiiIii . OOooOOo
if 48 - 48: iIii1I11I1II1 * Ii1I - OoooooooOO / oO0o - OoO0O00 / i11iIiiIii
oOo = lisp_build_map_referral ( Oo00o , i1i11Ii1 , I1II111i1 , O0oo0oo0 , Ii1 , i11III1I )
i11III1I = map_request . nonce >> 32
if ( map_request . nonce != 0 and i11III1I != 0xdfdf0e1d ) : port = LISP_CTRL_PORT
lisp_send_map_referral ( lisp_sockets , oOo , ecm_source , port )
return
if 24 - 24: I1IiiI
if 63 - 63: I11i - iIii1I11I1II1 * Ii1I + OoooooooOO . i11iIiiIii
if 94 - 94: OoO0O00 . oO0o . OoOoOO00 * i11iIiiIii
if 96 - 96: i1IIi . OoO0O00 . OoO0O00 - o0oOOo0O0Ooo - Ii1I
if 33 - 33: ooOoO0o + I1ii11iIi11i - I1IiiI . iII111i / OoO0O00
if 91 - 91: OOooOOo - OoooooooOO . OoO0O00
if 34 - 34: Ii1I . I1IiiI . i1IIi * I1ii11iIi11i
if 77 - 77: ooOoO0o . II111iiii
if 41 - 41: IiII
if 27 - 27: IiII / IiII
if 91 - 91: Ii1I
if 93 - 93: OoO0O00 * OoO0O00 * I1ii11iIi11i * OoO0O00 * o0oOOo0O0Ooo
if 84 - 84: I1Ii111 * OoO0O00 - ooOoO0o - Oo0Ooo . OoO0O00 % oO0o
def lisp_find_negative_mask_len ( eid , entry_prefix , neg_prefix ) :
oOoOoOooO0o0O0O = eid . hash_address ( entry_prefix )
I1IiIiIIIiIII = eid . addr_length ( ) * 8
Ooo = 0
if 58 - 58: I1Ii111 * O0 . Ii1I * OOooOOo * OoooooooOO * ooOoO0o
if 97 - 97: Oo0Ooo . ooOoO0o * OoooooooOO * i11iIiiIii / Ii1I
if 46 - 46: Ii1I - i1IIi . OoooooooOO % Ii1I
if 39 - 39: o0oOOo0O0Ooo
for Ooo in range ( I1IiIiIIIiIII ) :
o00oOo0o0o00 = 1 << ( I1IiIiIIIiIII - Ooo - 1 )
if ( oOoOoOooO0o0O0O & o00oOo0o0o00 ) : break
if 83 - 83: iIii1I11I1II1 - OoO0O00 - I1Ii111
if 27 - 27: IiII - iII111i * i11iIiiIii % i11iIiiIii + OoOoOO00 . I1Ii111
if ( Ooo > neg_prefix . mask_len ) : neg_prefix . mask_len = Ooo
return
if 10 - 10: IiII / i11iIiiIii
if 6 - 6: I11i - OOooOOo
if 100 - 100: Oo0Ooo / OOooOOo + iII111i - o0oOOo0O0Ooo + OoO0O00 % IiII
if 91 - 91: Ii1I % I11i % Oo0Ooo / OoO0O00 - II111iiii - o0oOOo0O0Ooo
if 50 - 50: OoooooooOO
if 51 - 51: II111iiii - oO0o % OoooooooOO - II111iiii / O0 - OoooooooOO
if 21 - 21: iII111i * o0oOOo0O0Ooo
if 85 - 85: I1ii11iIi11i . OoOoOO00 . i1IIi % OOooOOo * I11i . I1Ii111
if 26 - 26: I1Ii111 + Oo0Ooo + II111iiii % OoOoOO00 % OOooOOo
if 40 - 40: I1ii11iIi11i + i1IIi
def lisp_neg_prefix_walk ( entry , parms ) :
Oo00o , i1III11I11 , O0OOOo = parms
if 51 - 51: oO0o + I1IiiI - I1Ii111 * Oo0Ooo . II111iiii
if ( i1III11I11 == None ) :
if ( entry . eid . instance_id != Oo00o . instance_id ) :
return ( [ True , parms ] )
if 63 - 63: I1ii11iIi11i - ooOoO0o - II111iiii + II111iiii
if ( entry . eid . afi != Oo00o . afi ) : return ( [ True , parms ] )
else :
if ( entry . eid . is_more_specific ( i1III11I11 ) == False ) :
return ( [ True , parms ] )
if 17 - 17: I1ii11iIi11i % OoO0O00 % oO0o
if 60 - 60: i1IIi % Ii1I - O0 / iII111i
if 14 - 14: i1IIi * OoooooooOO . IiII
if 26 - 26: O0
if 70 - 70: i1IIi % IiII % iIii1I11I1II1 . II111iiii * Oo0Ooo . o0oOOo0O0Ooo
if 33 - 33: iIii1I11I1II1 / OoooooooOO / I1IiiI + II111iiii
lisp_find_negative_mask_len ( Oo00o , entry . eid , O0OOOo )
return ( [ True , parms ] )
if 42 - 42: OoOoOO00 / i1IIi * O0
if 46 - 46: OOooOOo - I1Ii111 + I1IiiI - ooOoO0o
if 96 - 96: IiII + i1IIi - I11i * I11i - OoO0O00 % II111iiii
if 47 - 47: I1Ii111 . i11iIiiIii + oO0o . I1ii11iIi11i
if 12 - 12: iIii1I11I1II1 % I1Ii111 * OoOoOO00 / OoooooooOO % OoooooooOO
if 81 - 81: iIii1I11I1II1 - Oo0Ooo - ooOoO0o . OoO0O00 + I1ii11iIi11i
if 84 - 84: iII111i . OOooOOo . iII111i * oO0o % Ii1I . oO0o
if 86 - 86: iII111i * ooOoO0o / iIii1I11I1II1 + Ii1I . iII111i
def lisp_ddt_compute_neg_prefix ( eid , ddt_entry , cache ) :
if 64 - 64: IiII - Oo0Ooo % iII111i % I11i
if 42 - 42: Oo0Ooo . OoO0O00
if 22 - 22: ooOoO0o - o0oOOo0O0Ooo + I11i / I1IiiI + OOooOOo
if 10 - 10: oO0o / I1IiiI
if ( eid . is_binary ( ) == False ) : return ( eid )
if 95 - 95: II111iiii - IiII % IiII . o0oOOo0O0Ooo
O0OOOo = lisp_address ( eid . afi , "" , 0 , 0 )
O0OOOo . copy_address ( eid )
O0OOOo . mask_len = 0
if 19 - 19: II111iiii . ooOoO0o . I11i - OoooooooOO / I1ii11iIi11i . I1Ii111
OoIiII = ddt_entry . print_eid_tuple ( )
i1III11I11 = ddt_entry . eid
if 10 - 10: I1IiiI / I1Ii111 % IiII . OoOoOO00
if 65 - 65: II111iiii + OoO0O00 + OoO0O00
if 48 - 48: I1ii11iIi11i / iIii1I11I1II1
if 47 - 47: I1Ii111
if 41 - 41: IiII
eid , i1III11I11 , O0OOOo = cache . walk_cache ( lisp_neg_prefix_walk ,
( eid , i1III11I11 , O0OOOo ) )
if 25 - 25: I11i % iIii1I11I1II1
if 27 - 27: iIii1I11I1II1 . O0 . oO0o
if 21 - 21: oO0o * I1ii11iIi11i
if 44 - 44: o0oOOo0O0Ooo * IiII - o0oOOo0O0Ooo
O0OOOo . mask_address ( O0OOOo . mask_len )
if 90 - 90: i1IIi + I1ii11iIi11i * oO0o % i11iIiiIii - OoO0O00
lprint ( ( "Least specific prefix computed from ddt-cache for EID {} " + "using auth-prefix {} is {}" ) . format ( green ( eid . print_address ( ) , False ) ,
# oO0o
OoIiII , O0OOOo . print_prefix ( ) ) )
return ( O0OOOo )
if 15 - 15: I1ii11iIi11i - I1IiiI % OOooOOo
if 9 - 9: Ii1I / O0
if 95 - 95: iII111i / I11i
if 86 - 86: O0 / II111iiii . Oo0Ooo / Oo0Ooo * II111iiii
if 22 - 22: Ii1I
if 81 - 81: iIii1I11I1II1 . ooOoO0o % I11i
if 64 - 64: I1Ii111 . Oo0Ooo * o0oOOo0O0Ooo
if 32 - 32: oO0o . I1Ii111 * I1Ii111
def lisp_ms_compute_neg_prefix ( eid , group ) :
O0OOOo = lisp_address ( eid . afi , "" , 0 , 0 )
O0OOOo . copy_address ( eid )
O0OOOo . mask_len = 0
i1IiI1 = lisp_address ( group . afi , "" , 0 , 0 )
i1IiI1 . copy_address ( group )
i1IiI1 . mask_len = 0
i1III11I11 = None
if 67 - 67: I1IiiI * I11i
if 43 - 43: IiII * Oo0Ooo / OoOoOO00 + I1IiiI - i11iIiiIii + II111iiii
if 81 - 81: I11i / Oo0Ooo % Ii1I % OoO0O00
if 87 - 87: O0 % II111iiii
if 42 - 42: I1IiiI . i1IIi
if ( group . is_null ( ) ) :
I1II111i1 = lisp_ddt_cache . lookup_cache ( eid , False )
if ( I1II111i1 == None ) :
O0OOOo . mask_len = O0OOOo . host_mask_len ( )
i1IiI1 . mask_len = i1IiI1 . host_mask_len ( )
return ( [ O0OOOo , i1IiI1 , LISP_DDT_ACTION_NOT_AUTH ] )
if 98 - 98: o0oOOo0O0Ooo % I11i . Oo0Ooo * Oo0Ooo % iII111i
IIi11o0oO = lisp_sites_by_eid
if ( I1II111i1 . is_auth_prefix ( ) ) : i1III11I11 = I1II111i1 . eid
else :
I1II111i1 = lisp_ddt_cache . lookup_cache ( group , False )
if ( I1II111i1 == None ) :
O0OOOo . mask_len = O0OOOo . host_mask_len ( )
i1IiI1 . mask_len = i1IiI1 . host_mask_len ( )
return ( [ O0OOOo , i1IiI1 , LISP_DDT_ACTION_NOT_AUTH ] )
if 40 - 40: iIii1I11I1II1 / i11iIiiIii
if ( I1II111i1 . is_auth_prefix ( ) ) : i1III11I11 = I1II111i1 . group
if 16 - 16: O0 . Oo0Ooo / oO0o * OoooooooOO * i1IIi - Oo0Ooo
group , i1III11I11 , i1IiI1 = lisp_sites_by_eid . walk_cache ( lisp_neg_prefix_walk , ( group , i1III11I11 , i1IiI1 ) )
if 5 - 5: iIii1I11I1II1
if 43 - 43: iII111i / i11iIiiIii
i1IiI1 . mask_address ( i1IiI1 . mask_len )
if 8 - 8: I1ii11iIi11i . i11iIiiIii . Oo0Ooo % I1IiiI % ooOoO0o
lprint ( ( "Least specific prefix computed from site-cache for " + "group EID {} using auth-prefix {} is {}" ) . format ( group . print_address ( ) , i1III11I11 . print_prefix ( ) if ( i1III11I11 != None ) else "'not found'" ,
# oO0o . Oo0Ooo
# ooOoO0o
# I1ii11iIi11i * I1ii11iIi11i * iII111i . OOooOOo % iII111i
i1IiI1 . print_prefix ( ) ) )
if 20 - 20: o0oOOo0O0Ooo
IIi11o0oO = I1II111i1 . source_cache
if 54 - 54: II111iiii * OoOoOO00
if 46 - 46: ooOoO0o . I1IiiI - ooOoO0o + Oo0Ooo
if 31 - 31: OOooOOo + ooOoO0o . i1IIi - OoO0O00
if 16 - 16: I11i + I1IiiI - Ii1I / I1ii11iIi11i + Ii1I
if 38 - 38: i1IIi * iIii1I11I1II1 * iII111i + OoOoOO00
O0oo0oo0 = LISP_DDT_ACTION_DELEGATION_HOLE if ( i1III11I11 != None ) else LISP_DDT_ACTION_NOT_AUTH
if 64 - 64: OoO0O00 % o0oOOo0O0Ooo
if 72 - 72: O0 + OoOoOO00 % OOooOOo / oO0o / IiII
if 98 - 98: Oo0Ooo . II111iiii * I11i
if 39 - 39: IiII * o0oOOo0O0Ooo + Ii1I - I11i
if 70 - 70: oO0o * ooOoO0o / ooOoO0o - Ii1I * Ii1I % OOooOOo
if 91 - 91: OoO0O00 - OoO0O00 % O0
eid , i1III11I11 , O0OOOo = IIi11o0oO . walk_cache ( lisp_neg_prefix_walk ,
( eid , i1III11I11 , O0OOOo ) )
if 67 - 67: ooOoO0o * i1IIi
if 66 - 66: o0oOOo0O0Ooo - I1ii11iIi11i . OoOoOO00 / iII111i - Ii1I - i1IIi
if 97 - 97: oO0o % iII111i - OOooOOo . OoooooooOO
if 94 - 94: Oo0Ooo
O0OOOo . mask_address ( O0OOOo . mask_len )
if 10 - 10: i11iIiiIii / I1ii11iIi11i . i1IIi + i1IIi * iII111i
lprint ( ( "Least specific prefix computed from site-cache for EID {} " + "using auth-prefix {} is {}" ) . format ( green ( eid . print_address ( ) , False ) ,
# OOooOOo % i11iIiiIii / O0 - OoOoOO00
# I1ii11iIi11i . OoO0O00
i1III11I11 . print_prefix ( ) if ( i1III11I11 != None ) else "'not found'" , O0OOOo . print_prefix ( ) ) )
if 19 - 19: I1IiiI / iII111i . OOooOOo / oO0o + I1ii11iIi11i + OOooOOo
if 1 - 1: iIii1I11I1II1
return ( [ O0OOOo , i1IiI1 , O0oo0oo0 ] )
if 59 - 59: ooOoO0o % I1IiiI + i1IIi * I1Ii111 % o0oOOo0O0Ooo * II111iiii
if 22 - 22: OoOoOO00 * O0 + OoOoOO00 / iIii1I11I1II1 + oO0o + IiII
if 69 - 69: iIii1I11I1II1 . I1Ii111 * iII111i
if 6 - 6: I11i - IiII - I11i - II111iiii
if 72 - 72: i1IIi / OOooOOo . Oo0Ooo . oO0o
if 72 - 72: o0oOOo0O0Ooo % iIii1I11I1II1
if 74 - 74: Oo0Ooo % OOooOOo + i11iIiiIii
if 17 - 17: OoOoOO00 . I1IiiI
def lisp_ms_send_map_referral ( lisp_sockets , map_request , ecm_source , port ,
action , eid_prefix , group_prefix ) :
if 30 - 30: i1IIi * OoOoOO00 * I11i . O0
Oo00o = map_request . target_eid
i1i11Ii1 = map_request . target_group
i11III1I = map_request . nonce
if 45 - 45: iII111i
if ( action == LISP_DDT_ACTION_MS_ACK ) : Ii1 = 1440
if 99 - 99: o0oOOo0O0Ooo % ooOoO0o % i11iIiiIii
if 32 - 32: IiII - Ii1I
if 44 - 44: OoooooooOO . oO0o
if 30 - 30: I1Ii111 % IiII / II111iiii
OOOoo = lisp_map_referral ( )
OOOoo . record_count = 1
OOOoo . nonce = i11III1I
oOo = OOOoo . encode ( )
OOOoo . print_map_referral ( )
if 68 - 68: oO0o / O0 / OOooOOo
o000ooo0o0O = False
if 3 - 3: o0oOOo0O0Ooo / o0oOOo0O0Ooo
if 17 - 17: OoO0O00 * i1IIi
if 50 - 50: OoOoOO00 + I11i
if 56 - 56: OOooOOo * OOooOOo + I1IiiI % I1IiiI - I11i
if 1 - 1: OoooooooOO . ooOoO0o - i1IIi
if 73 - 73: iIii1I11I1II1 - I1Ii111 % Oo0Ooo . O0
if ( action == LISP_DDT_ACTION_SITE_NOT_FOUND ) :
eid_prefix , group_prefix , action = lisp_ms_compute_neg_prefix ( Oo00o ,
i1i11Ii1 )
Ii1 = 15
if 16 - 16: OoO0O00 / Oo0Ooo / IiII . Oo0Ooo - OoooooooOO
if ( action == LISP_DDT_ACTION_MS_NOT_REG ) : Ii1 = 1
if ( action == LISP_DDT_ACTION_MS_ACK ) : Ii1 = 1440
if ( action == LISP_DDT_ACTION_DELEGATION_HOLE ) : Ii1 = 15
if ( action == LISP_DDT_ACTION_NOT_AUTH ) : Ii1 = 0
if 5 - 5: OoOoOO00 . I11i
I1I1iI1IIII = False
iI1111i = 0
I1II111i1 = lisp_ddt_cache_lookup ( Oo00o , i1i11Ii1 , False )
if ( I1II111i1 != None ) :
iI1111i = len ( I1II111i1 . delegation_set )
I1I1iI1IIII = I1II111i1 . is_ms_peer_entry ( )
I1II111i1 . map_referrals_sent += 1
if 20 - 20: ooOoO0o . iII111i % OOooOOo + i11iIiiIii
if 64 - 64: i1IIi . o0oOOo0O0Ooo * I1Ii111 - O0
if 76 - 76: I1IiiI % Ii1I + OoO0O00 + I1ii11iIi11i * II111iiii + Oo0Ooo
if 3 - 3: Ii1I - I1IiiI + O0
if 90 - 90: Ii1I + OoooooooOO . i11iIiiIii / Oo0Ooo % OoOoOO00 / IiII
if ( action == LISP_DDT_ACTION_NOT_AUTH ) : o000ooo0o0O = True
if ( action in ( LISP_DDT_ACTION_MS_REFERRAL , LISP_DDT_ACTION_MS_ACK ) ) :
o000ooo0o0O = ( I1I1iI1IIII == False )
if 45 - 45: OoooooooOO / oO0o . I1ii11iIi11i + OOooOOo
if 54 - 54: Ii1I - o0oOOo0O0Ooo + OoOoOO00 / OoooooooOO
if 61 - 61: I11i / IiII % OoooooooOO - i11iIiiIii * i1IIi % o0oOOo0O0Ooo
if 67 - 67: o0oOOo0O0Ooo - Ii1I
if 29 - 29: OoOoOO00 . I1ii11iIi11i
IiII1iiI = lisp_eid_record ( )
IiII1iiI . rloc_count = iI1111i
IiII1iiI . authoritative = True
IiII1iiI . action = action
IiII1iiI . ddt_incomplete = o000ooo0o0O
IiII1iiI . eid = eid_prefix
IiII1iiI . group = group_prefix
IiII1iiI . record_ttl = Ii1
if 24 - 24: OOooOOo + i1IIi . I11i . OoOoOO00 + OoooooooOO
oOo += IiII1iiI . encode ( )
IiII1iiI . print_record ( " " , True )
if 98 - 98: ooOoO0o + i1IIi / I1IiiI
if 1 - 1: IiII . OoooooooOO + II111iiii
if 6 - 6: O0 * Oo0Ooo
if 20 - 20: OoooooooOO * i1IIi * IiII / OoooooooOO - Oo0Ooo / i11iIiiIii
if ( iI1111i != 0 ) :
for oOoI1I in I1II111i1 . delegation_set :
o00o = lisp_rloc_record ( )
o00o . rloc = oOoI1I . delegate_address
o00o . priority = oOoI1I . priority
o00o . weight = oOoI1I . weight
o00o . mpriority = 255
o00o . mweight = 0
o00o . reach_bit = True
oOo += o00o . encode ( )
o00o . print_record ( " " )
if 28 - 28: iIii1I11I1II1 % OOooOOo * I1IiiI
if 28 - 28: O0 . OoOoOO00
if 27 - 27: I1ii11iIi11i / II111iiii + O0 % I1ii11iIi11i
if 72 - 72: I1IiiI - i1IIi
if 11 - 11: iIii1I11I1II1 . OoO0O00 * Ii1I
if 65 - 65: Oo0Ooo / OoooooooOO
if 60 - 60: II111iiii + I1IiiI % oO0o - o0oOOo0O0Ooo
if ( map_request . nonce != 0 ) : port = LISP_CTRL_PORT
lisp_send_map_referral ( lisp_sockets , oOo , ecm_source , port )
return
if 50 - 50: iIii1I11I1II1 - i11iIiiIii / iII111i + ooOoO0o / OOooOOo
if 80 - 80: IiII / OoooooooOO
if 69 - 69: OoOoOO00 + IiII
if 18 - 18: O0 / I11i
if 10 - 10: I1Ii111 * i1IIi
if 48 - 48: Oo0Ooo % i1IIi / iII111i . O0
if 27 - 27: I11i + iIii1I11I1II1 - i11iIiiIii
if 81 - 81: I11i + oO0o * iIii1I11I1II1 * IiII
def lisp_send_negative_map_reply ( sockets , eid , group , nonce , dest , port , ttl ,
xtr_id , pubsub ) :
if 7 - 7: I11i - I1IiiI . iII111i + O0 / iIii1I11I1II1 - I1Ii111
lprint ( "Build negative Map-Reply EID-prefix {}, nonce 0x{} to ITR {}" . format ( lisp_print_eid_tuple ( eid , group ) , lisp_hex_string ( nonce ) ,
# ooOoO0o . O0
red ( dest . print_address ( ) , False ) ) )
if 5 - 5: OoooooooOO % OoooooooOO * oO0o * ooOoO0o + ooOoO0o * oO0o
O0oo0oo0 = LISP_NATIVE_FORWARD_ACTION if group . is_null ( ) else LISP_DROP_ACTION
if 12 - 12: IiII - II111iiii
if 71 - 71: i11iIiiIii . Oo0Ooo + oO0o + oO0o
if 97 - 97: i11iIiiIii / O0 . iII111i . iIii1I11I1II1
if 40 - 40: OoOoOO00 / iII111i / O0 * ooOoO0o
if 58 - 58: iII111i % I11i
if ( lisp_get_eid_hash ( eid ) != None ) :
O0oo0oo0 = LISP_SEND_MAP_REQUEST_ACTION
if 71 - 71: I1IiiI + OoO0O00 + IiII * I11i
if 61 - 61: I1IiiI / OoOoOO00
oOo = lisp_build_map_reply ( eid , group , [ ] , nonce , O0oo0oo0 , ttl , False ,
None , False , False )
if 58 - 58: o0oOOo0O0Ooo - Oo0Ooo % OoOoOO00 + I11i
if 10 - 10: II111iiii / iIii1I11I1II1 % i11iIiiIii
if 29 - 29: ooOoO0o - iII111i + IiII % Ii1I - oO0o - ooOoO0o
if 43 - 43: oO0o
if ( pubsub ) :
lisp_process_pubsub ( sockets , oOo , eid , dest , port , nonce , ttl ,
xtr_id )
else :
lisp_send_map_reply ( sockets , oOo , dest , port )
if 22 - 22: I1Ii111 + i11iIiiIii
return
if 49 - 49: O0 % II111iiii . OOooOOo + iII111i + iIii1I11I1II1 / i11iIiiIii
if 79 - 79: II111iiii + ooOoO0o - i1IIi - i1IIi + II111iiii . i1IIi
if 78 - 78: I1IiiI * I11i % OOooOOo + Ii1I + OoOoOO00
if 23 - 23: iII111i / Oo0Ooo % OoooooooOO * OoooooooOO . iII111i / I1ii11iIi11i
if 30 - 30: oO0o - OoOoOO00 . I1IiiI
if 17 - 17: OoOoOO00
if 76 - 76: I1ii11iIi11i - ooOoO0o % OoooooooOO / Oo0Ooo % IiII / ooOoO0o
def lisp_retransmit_ddt_map_request ( mr ) :
ooooooOoo = mr . mr_source . print_address ( )
I1i1I = mr . print_eid_tuple ( )
i11III1I = mr . nonce
if 71 - 71: IiII * Oo0Ooo
if 25 - 25: II111iiii
if 8 - 8: OoO0O00
if 17 - 17: iIii1I11I1II1 - Oo0Ooo
if 25 - 25: O0 + I1ii11iIi11i
if ( mr . last_request_sent_to ) :
ooOII1ii1ii1I1 = mr . last_request_sent_to . print_address ( )
IiIIiIiI1II = lisp_referral_cache_lookup ( mr . last_cached_prefix [ 0 ] ,
mr . last_cached_prefix [ 1 ] , True )
if ( IiIIiIiI1II and IiIIiIiI1II . referral_set . has_key ( ooOII1ii1ii1I1 ) ) :
IiIIiIiI1II . referral_set [ ooOII1ii1ii1I1 ] . no_responses += 1
if 68 - 68: O0 * I1ii11iIi11i + OoooooooOO . I1Ii111
if 4 - 4: I11i + I11i
if 42 - 42: OoOoOO00 % I1IiiI * Oo0Ooo * II111iiii + O0 - II111iiii
if 97 - 97: I1IiiI
if 87 - 87: I11i + iIii1I11I1II1
if 91 - 91: oO0o
if 58 - 58: i11iIiiIii / Ii1I - OoooooooOO
if ( mr . retry_count == LISP_MAX_MAP_NOTIFY_RETRIES ) :
lprint ( "DDT Map-Request retry limit reached for EID {}, nonce 0x{}" . format ( green ( I1i1I , False ) , lisp_hex_string ( i11III1I ) ) )
if 25 - 25: i1IIi * ooOoO0o % OOooOOo / I1IiiI
mr . dequeue_map_request ( )
return
if 75 - 75: i11iIiiIii
if 38 - 38: iIii1I11I1II1
mr . retry_count += 1
if 80 - 80: OoO0O00
o00oOOO = green ( ooooooOoo , False )
i1 = green ( I1i1I , False )
lprint ( "Retransmit DDT {} from {}ITR {} EIDs: {} -> {}, nonce 0x{}" . format ( bold ( "Map-Request" , False ) , "P" if mr . from_pitr else "" ,
# ooOoO0o / II111iiii % OoOoOO00 % I1Ii111 . I1Ii111
red ( mr . itr . print_address ( ) , False ) , o00oOOO , i1 ,
lisp_hex_string ( i11III1I ) ) )
if 43 - 43: I11i * II111iiii
if 14 - 14: I1ii11iIi11i * OoooooooOO / OoO0O00 / OoOoOO00 / OoooooooOO
if 17 - 17: i1IIi
if 80 - 80: i1IIi - iIii1I11I1II1 + OoooooooOO + ooOoO0o / IiII - I1ii11iIi11i
lisp_send_ddt_map_request ( mr , False )
if 90 - 90: I1IiiI * ooOoO0o - I11i + O0 - I11i
if 59 - 59: OOooOOo % II111iiii
if 30 - 30: i1IIi / I1ii11iIi11i
if 4 - 4: Oo0Ooo
mr . retransmit_timer = threading . Timer ( LISP_DDT_MAP_REQUEST_INTERVAL ,
lisp_retransmit_ddt_map_request , [ mr ] )
mr . retransmit_timer . start ( )
return
if 31 - 31: IiII
if 86 - 86: Oo0Ooo + IiII / o0oOOo0O0Ooo % OoOoOO00
if 49 - 49: iIii1I11I1II1 % Oo0Ooo % I11i * Ii1I - OoO0O00
if 15 - 15: i11iIiiIii + o0oOOo0O0Ooo . Ii1I . I1IiiI
if 8 - 8: iII111i % II111iiii + IiII
if 5 - 5: i1IIi + II111iiii
if 75 - 75: OOooOOo . IiII . I1IiiI + OoooooooOO
if 35 - 35: I11i % i1IIi - I1ii11iIi11i . Oo0Ooo
def lisp_get_referral_node ( referral , source_eid , dest_eid ) :
if 69 - 69: ooOoO0o * OoO0O00 % o0oOOo0O0Ooo * o0oOOo0O0Ooo
if 35 - 35: I1IiiI . OOooOOo * OoO0O00 . I1ii11iIi11i - I1IiiI
if 5 - 5: i1IIi * II111iiii
if 64 - 64: I1IiiI * iIii1I11I1II1 % I1Ii111
Iii11I = [ ]
for IiOO00O00 in referral . referral_set . values ( ) :
if ( IiOO00O00 . updown == False ) : continue
if ( len ( Iii11I ) == 0 or Iii11I [ 0 ] . priority == IiOO00O00 . priority ) :
Iii11I . append ( IiOO00O00 )
elif ( Iii11I [ 0 ] . priority > IiOO00O00 . priority ) :
Iii11I = [ ]
Iii11I . append ( IiOO00O00 )
if 58 - 58: I1ii11iIi11i - oO0o % I11i * O0
if 43 - 43: OoOoOO00 + O0
if 71 - 71: ooOoO0o * I1IiiI / I1ii11iIi11i
i1ii = len ( Iii11I )
if ( i1ii == 0 ) : return ( None )
if 59 - 59: OoOoOO00 . iIii1I11I1II1 / I1ii11iIi11i - OoO0O00 - OoOoOO00
ooo000 = dest_eid . hash_address ( source_eid )
ooo000 = ooo000 % i1ii
return ( Iii11I [ ooo000 ] )
if 69 - 69: o0oOOo0O0Ooo
if 67 - 67: OoO0O00 + iIii1I11I1II1
if 20 - 20: OoOoOO00 + Oo0Ooo - OoOoOO00
if 40 - 40: oO0o . O0 / IiII % I11i * i1IIi
if 75 - 75: Ii1I . o0oOOo0O0Ooo / I11i
if 31 - 31: I11i + OOooOOo / I1IiiI / iIii1I11I1II1 + o0oOOo0O0Ooo
if 76 - 76: i1IIi
def lisp_send_ddt_map_request ( mr , send_to_root ) :
o000oOOooO00 = mr . lisp_sockets
i11III1I = mr . nonce
OooOoOOo0 = mr . itr
O0Oi1iIIiI1i = mr . mr_source
oO00oo000O = mr . print_eid_tuple ( )
if 18 - 18: i1IIi
if 42 - 42: II111iiii - i1IIi . oO0o % OOooOOo % ooOoO0o - i11iIiiIii
if 23 - 23: OOooOOo + iIii1I11I1II1 - i1IIi
if 72 - 72: OOooOOo . I1IiiI * O0 + i11iIiiIii - iII111i
if 79 - 79: o0oOOo0O0Ooo + I1ii11iIi11i
if ( mr . send_count == 8 ) :
lprint ( "Giving up on map-request-queue entry {}, nonce 0x{}" . format ( green ( oO00oo000O , False ) , lisp_hex_string ( i11III1I ) ) )
if 46 - 46: I11i
mr . dequeue_map_request ( )
return
if 78 - 78: IiII / II111iiii
if 55 - 55: Oo0Ooo
if 80 - 80: o0oOOo0O0Ooo - I1Ii111 * O0 * iIii1I11I1II1
if 59 - 59: I1ii11iIi11i + I11i / OoO0O00
if 36 - 36: o0oOOo0O0Ooo + ooOoO0o * I11i
if 81 - 81: OOooOOo * I11i - I1ii11iIi11i
if ( send_to_root ) :
OOOo0o000oO0O = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
II1I = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
mr . tried_root = True
lprint ( "Jumping up to root for EID {}" . format ( green ( oO00oo000O , False ) ) )
else :
OOOo0o000oO0O = mr . eid
II1I = mr . group
if 34 - 34: I1Ii111 * I1ii11iIi11i % Oo0Ooo . OoO0O00 + OoO0O00
if 19 - 19: OOooOOo - Ii1I % Ii1I * Oo0Ooo % iIii1I11I1II1 . Ii1I
if 9 - 9: Oo0Ooo . I1IiiI - i11iIiiIii / o0oOOo0O0Ooo
if 54 - 54: i11iIiiIii + I1Ii111 . I1Ii111 * I1ii11iIi11i % I1Ii111 - OoooooooOO
if 76 - 76: IiII + i1IIi + i11iIiiIii . oO0o
I1IIiII1 = lisp_referral_cache_lookup ( OOOo0o000oO0O , II1I , False )
if ( I1IIiII1 == None ) :
lprint ( "No referral cache entry found" )
lisp_send_negative_map_reply ( o000oOOooO00 , OOOo0o000oO0O , II1I ,
i11III1I , OooOoOOo0 , mr . sport , 15 , None , False )
return
if 35 - 35: iII111i / iII111i * OoOoOO00 - i11iIiiIii
if 27 - 27: i1IIi / I11i + I1Ii111 . II111iiii * OoO0O00
OoO0OOOooo = I1IIiII1 . print_eid_tuple ( )
lprint ( "Found referral cache entry {}, referral-type: {}" . format ( OoO0OOOooo ,
I1IIiII1 . print_referral_type ( ) ) )
if 10 - 10: OoO0O00 % iIii1I11I1II1 * OoOoOO00 / i11iIiiIii - I1IiiI . O0
IiOO00O00 = lisp_get_referral_node ( I1IIiII1 , O0Oi1iIIiI1i , mr . eid )
if ( IiOO00O00 == None ) :
lprint ( "No reachable referral-nodes found" )
mr . dequeue_map_request ( )
lisp_send_negative_map_reply ( o000oOOooO00 , I1IIiII1 . eid ,
I1IIiII1 . group , i11III1I , OooOoOOo0 , mr . sport , 1 , None , False )
return
if 2 - 2: II111iiii
if 13 - 13: Ii1I % i11iIiiIii
lprint ( "Send DDT Map-Request to {} {} for EID {}, nonce 0x{}" . format ( IiOO00O00 . referral_address . print_address ( ) ,
# I1Ii111
I1IIiII1 . print_referral_type ( ) , green ( oO00oo000O , False ) ,
lisp_hex_string ( i11III1I ) ) )
if 68 - 68: OoOoOO00 * I1Ii111 - OoO0O00 / i1IIi % OoOoOO00 / i1IIi
if 41 - 41: oO0o % oO0o . iIii1I11I1II1 . o0oOOo0O0Ooo
if 95 - 95: i1IIi . ooOoO0o . Oo0Ooo
if 13 - 13: OOooOOo - Oo0Ooo % O0 . I1Ii111
oo0OoOiI1I1i1i = ( I1IIiII1 . referral_type == LISP_DDT_ACTION_MS_REFERRAL or
I1IIiII1 . referral_type == LISP_DDT_ACTION_MS_ACK )
lisp_send_ecm ( o000oOOooO00 , mr . packet , O0Oi1iIIiI1i , mr . sport , mr . eid ,
IiOO00O00 . referral_address , to_ms = oo0OoOiI1I1i1i , ddt = True )
if 55 - 55: OoOoOO00 + i11iIiiIii * oO0o
if 84 - 84: I1Ii111 - iII111i * Ii1I * i11iIiiIii % oO0o / ooOoO0o
if 56 - 56: OOooOOo / i11iIiiIii - OoooooooOO . i1IIi
if 70 - 70: oO0o / OoO0O00 % Oo0Ooo . Oo0Ooo
mr . last_request_sent_to = IiOO00O00 . referral_address
mr . last_sent = lisp_get_timestamp ( )
mr . send_count += 1
IiOO00O00 . map_requests_sent += 1
return
if 51 - 51: I1IiiI + O0 / i1IIi / iIii1I11I1II1 % o0oOOo0O0Ooo % O0
if 44 - 44: OoOoOO00 * ooOoO0o - Ii1I
if 82 - 82: Ii1I - O0 * ooOoO0o . ooOoO0o
if 32 - 32: o0oOOo0O0Ooo . OoooooooOO % OOooOOo
if 2 - 2: OoOoOO00 + I1ii11iIi11i + oO0o
if 27 - 27: OoooooooOO - Ii1I / OoooooooOO + OoO0O00
if 58 - 58: OOooOOo * I11i . I1IiiI
if 46 - 46: I11i + II111iiii * iII111i % ooOoO0o - I1IiiI
def lisp_mr_process_map_request ( lisp_sockets , packet , map_request , ecm_source ,
sport , mr_source ) :
if 73 - 73: I1ii11iIi11i * iIii1I11I1II1 . I1Ii111 - Ii1I
Oo00o = map_request . target_eid
i1i11Ii1 = map_request . target_group
I1i1I = map_request . print_eid_tuple ( )
ooooooOoo = mr_source . print_address ( )
i11III1I = map_request . nonce
if 11 - 11: I11i
o00oOOO = green ( ooooooOoo , False )
i1 = green ( I1i1I , False )
lprint ( "Received Map-Request from {}ITR {} EIDs: {} -> {}, nonce 0x{}" . format ( "P" if map_request . pitr_bit else "" ,
# i1IIi / O0 * OoOoOO00
red ( ecm_source . print_address ( ) , False ) , o00oOOO , i1 ,
lisp_hex_string ( i11III1I ) ) )
if 29 - 29: oO0o * OoO0O00 . IiII
if 99 - 99: oO0o
if 21 - 21: IiII * OoO0O00 / OoooooooOO % o0oOOo0O0Ooo + OoO0O00
if 25 - 25: IiII % OOooOOo + Ii1I * I1ii11iIi11i
Ii1IIi1III1i = lisp_ddt_map_request ( lisp_sockets , packet , Oo00o , i1i11Ii1 , i11III1I )
Ii1IIi1III1i . packet = packet
Ii1IIi1III1i . itr = ecm_source
Ii1IIi1III1i . mr_source = mr_source
Ii1IIi1III1i . sport = sport
Ii1IIi1III1i . from_pitr = map_request . pitr_bit
Ii1IIi1III1i . queue_map_request ( )
if 20 - 20: i1IIi % II111iiii . IiII % iIii1I11I1II1
lisp_send_ddt_map_request ( Ii1IIi1III1i , False )
return
if 9 - 9: o0oOOo0O0Ooo
if 68 - 68: OOooOOo % Oo0Ooo * ooOoO0o * OoO0O00 / iII111i
if 96 - 96: i11iIiiIii - I1IiiI % OoOoOO00 * Ii1I % OoO0O00 % O0
if 100 - 100: oO0o . OoooooooOO
if 58 - 58: I11i % OoooooooOO
if 97 - 97: OOooOOo - IiII
if 77 - 77: i1IIi / IiII - o0oOOo0O0Ooo . Oo0Ooo / o0oOOo0O0Ooo . OoooooooOO
def lisp_process_map_request ( lisp_sockets , packet , ecm_source , ecm_port ,
mr_source , mr_port , ddt_request , ttl ) :
if 54 - 54: i1IIi * i11iIiiIii / I1IiiI * i1IIi
oOO = packet
IIi11i1I = lisp_map_request ( )
packet = IIi11i1I . decode ( packet , mr_source , mr_port )
if ( packet == None ) :
lprint ( "Could not decode Map-Request packet" )
return
if 76 - 76: iIii1I11I1II1 + o0oOOo0O0Ooo % OOooOOo % iIii1I11I1II1 . Oo0Ooo % o0oOOo0O0Ooo
if 18 - 18: I1IiiI * oO0o / Oo0Ooo / OOooOOo
IIi11i1I . print_map_request ( )
if 53 - 53: i1IIi - IiII - OoooooooOO - OOooOOo - OoOoOO00 / IiII
if 22 - 22: i1IIi + IiII
if 30 - 30: OoOoOO00
if 75 - 75: Ii1I . i1IIi / I1IiiI * iII111i . IiII / OoOoOO00
if ( IIi11i1I . rloc_probe ) :
lisp_process_rloc_probe_request ( lisp_sockets , IIi11i1I ,
mr_source , mr_port , ttl )
return
if 58 - 58: ooOoO0o + OOooOOo / ooOoO0o / i11iIiiIii
if 95 - 95: ooOoO0o
if 10 - 10: OoO0O00 % ooOoO0o * o0oOOo0O0Ooo
if 37 - 37: Ii1I . o0oOOo0O0Ooo
if 34 - 34: ooOoO0o * IiII . Ii1I + iIii1I11I1II1
if ( IIi11i1I . smr_bit ) :
lisp_process_smr ( IIi11i1I )
if 1 - 1: i11iIiiIii + I11i
if 78 - 78: Ii1I % Oo0Ooo / OoO0O00 . iIii1I11I1II1 . II111iiii
if 67 - 67: oO0o % I1Ii111
if 72 - 72: I1IiiI . i11iIiiIii . OoOoOO00 + I1IiiI - I1Ii111 + iII111i
if 15 - 15: I1IiiI
if ( IIi11i1I . smr_invoked_bit ) :
lisp_process_smr_invoked_request ( IIi11i1I )
if 88 - 88: IiII / I1ii11iIi11i % I11i + i11iIiiIii * O0 . I1Ii111
if 69 - 69: Oo0Ooo - OOooOOo / I1IiiI . i11iIiiIii * OoO0O00
if 45 - 45: I1Ii111 + OOooOOo
if 78 - 78: OoOoOO00 . Oo0Ooo % I11i
if 7 - 7: I1ii11iIi11i % Ii1I . OoooooooOO - iII111i
if ( lisp_i_am_etr ) :
lisp_etr_process_map_request ( lisp_sockets , IIi11i1I , mr_source ,
mr_port , ttl )
if 18 - 18: O0 * OoooooooOO % IiII - iIii1I11I1II1 % IiII * o0oOOo0O0Ooo
if 13 - 13: OoO0O00 + i11iIiiIii + O0 / ooOoO0o % iIii1I11I1II1
if 75 - 75: oO0o / i1IIi / Ii1I * Oo0Ooo
if 75 - 75: Oo0Ooo / OoooooooOO
if 98 - 98: II111iiii - I1Ii111 . ooOoO0o * iII111i
if ( lisp_i_am_ms ) :
packet = oOO
Oo00o , i1i11Ii1 , iIIi1 = lisp_ms_process_map_request ( lisp_sockets ,
oOO , IIi11i1I , mr_source , mr_port , ecm_source )
if ( ddt_request ) :
lisp_ms_send_map_referral ( lisp_sockets , IIi11i1I , ecm_source ,
ecm_port , iIIi1 , Oo00o , i1i11Ii1 )
if 76 - 76: i1IIi . OoO0O00 . O0 / OOooOOo - iII111i
return
if 60 - 60: I1IiiI
if 3 - 3: II111iiii % IiII % I1IiiI - I1IiiI . I1Ii111 - OoOoOO00
if 18 - 18: O0
if 26 - 26: i1IIi - iIii1I11I1II1
if 8 - 8: I1Ii111
if ( lisp_i_am_mr and not ddt_request ) :
lisp_mr_process_map_request ( lisp_sockets , oOO , IIi11i1I ,
ecm_source , mr_port , mr_source )
if 86 - 86: i1IIi
if 26 - 26: o0oOOo0O0Ooo % I1Ii111 / Oo0Ooo
if 68 - 68: II111iiii / Oo0Ooo / Oo0Ooo
if 1 - 1: Oo0Ooo
if 73 - 73: Ii1I * iIii1I11I1II1 / o0oOOo0O0Ooo - o0oOOo0O0Ooo / i1IIi
if ( lisp_i_am_ddt or ddt_request ) :
packet = oOO
lisp_ddt_process_map_request ( lisp_sockets , IIi11i1I , ecm_source ,
ecm_port )
if 64 - 64: Ii1I * I1ii11iIi11i % II111iiii
return
if 31 - 31: iIii1I11I1II1 % Oo0Ooo . I1IiiI % ooOoO0o
if 38 - 38: I1ii11iIi11i + I1Ii111 * I11i / OoO0O00 + o0oOOo0O0Ooo
if 46 - 46: iII111i
if 56 - 56: Oo0Ooo / II111iiii
if 61 - 61: Ii1I - i1IIi / ooOoO0o - Oo0Ooo / IiII % Oo0Ooo
if 53 - 53: OoooooooOO + iII111i % II111iiii * IiII
if 10 - 10: OoOoOO00 % I11i
if 46 - 46: i1IIi % IiII
def lisp_store_mr_stats ( source , nonce ) :
Ii1IIi1III1i = lisp_get_map_resolver ( source , None )
if ( Ii1IIi1III1i == None ) : return
if 45 - 45: I1ii11iIi11i / I1ii11iIi11i - OoO0O00
if 54 - 54: Ii1I + I1IiiI * OoOoOO00 + oO0o
if 10 - 10: Ii1I - I1IiiI / IiII / iII111i - I1Ii111 - o0oOOo0O0Ooo
if 75 - 75: OOooOOo . ooOoO0o
Ii1IIi1III1i . neg_map_replies_received += 1
Ii1IIi1III1i . last_reply = lisp_get_timestamp ( )
if 32 - 32: i1IIi / I11i + iIii1I11I1II1 . OOooOOo
if 67 - 67: iII111i - OoO0O00 % I1ii11iIi11i * Oo0Ooo
if 51 - 51: I1IiiI + O0
if 4 - 4: ooOoO0o / OoO0O00 * iIii1I11I1II1 * iIii1I11I1II1
if ( ( Ii1IIi1III1i . neg_map_replies_received % 100 ) == 0 ) : Ii1IIi1III1i . total_rtt = 0
if 33 - 33: iII111i . iIii1I11I1II1 - Ii1I
if 85 - 85: OoOoOO00
if 57 - 57: Oo0Ooo - II111iiii - I1ii11iIi11i * oO0o
if 41 - 41: I11i / ooOoO0o + IiII % OoooooooOO
if ( Ii1IIi1III1i . last_nonce == nonce ) :
Ii1IIi1III1i . total_rtt += ( time . time ( ) - Ii1IIi1III1i . last_used )
Ii1IIi1III1i . last_nonce = 0
if 72 - 72: Ii1I
if ( ( Ii1IIi1III1i . neg_map_replies_received % 10 ) == 0 ) : Ii1IIi1III1i . last_nonce = 0
return
if 22 - 22: o0oOOo0O0Ooo / OoO0O00 + OoOoOO00 + Ii1I . II111iiii * I11i
if 85 - 85: i11iIiiIii / I11i
if 28 - 28: i11iIiiIii + IiII / I11i . Ii1I / OoO0O00
if 100 - 100: o0oOOo0O0Ooo - I11i . o0oOOo0O0Ooo
if 90 - 90: OoOoOO00 / II111iiii / I11i * I11i - iIii1I11I1II1
if 87 - 87: IiII
if 92 - 92: OoO0O00 / IiII - ooOoO0o
def lisp_process_map_reply ( lisp_sockets , packet , source , ttl ) :
global lisp_map_cache
if 45 - 45: iII111i - I11i * ooOoO0o * OOooOOo / I1Ii111 * iII111i
Iiii = lisp_map_reply ( )
packet = Iiii . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Reply packet" )
return
if 33 - 33: iIii1I11I1II1 % I1ii11iIi11i - OOooOOo % iIii1I11I1II1 + I11i / i11iIiiIii
Iiii . print_map_reply ( )
if 64 - 64: I11i * ooOoO0o / OoooooooOO
if 38 - 38: iIii1I11I1II1 . OoO0O00 * OoOoOO00 + OoOoOO00 + ooOoO0o
if 44 - 44: I1ii11iIi11i * OOooOOo % OoO0O00 . I1IiiI % Ii1I + II111iiii
if 100 - 100: oO0o - II111iiii . o0oOOo0O0Ooo
oOo00OoOoo = None
for II11iIII1i1I in range ( Iiii . record_count ) :
IiII1iiI = lisp_eid_record ( )
packet = IiII1iiI . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode EID-record in Map-Reply packet" )
return
if 65 - 65: I1IiiI - OoO0O00 / iIii1I11I1II1 * iII111i + OoOoOO00 + IiII
IiII1iiI . print_record ( " " , False )
if 16 - 16: OoO0O00 % OOooOOo . I11i . I11i
if 4 - 4: O0 + I11i / OoOoOO00 * iIii1I11I1II1 . Ii1I
if 68 - 68: Oo0Ooo % ooOoO0o + i11iIiiIii / oO0o / II111iiii
if 63 - 63: OoO0O00 % i1IIi - OoooooooOO / ooOoO0o
if 75 - 75: OOooOOo + IiII + ooOoO0o / I1IiiI . iIii1I11I1II1 / Oo0Ooo
if ( IiII1iiI . rloc_count == 0 ) :
lisp_store_mr_stats ( source , Iiii . nonce )
if 81 - 81: I1Ii111 % II111iiii - Oo0Ooo / I1IiiI + i11iIiiIii . I11i
if 67 - 67: ooOoO0o . I1Ii111 . Oo0Ooo . Ii1I + iIii1I11I1II1 / OoooooooOO
O0OOo0OO0oOo = ( IiII1iiI . group . is_null ( ) == False )
if 65 - 65: Oo0Ooo + Ii1I + I1ii11iIi11i
if 76 - 76: IiII + IiII / I1IiiI / ooOoO0o . OoOoOO00
if 20 - 20: IiII / i11iIiiIii - ooOoO0o . OoooooooOO + OoooooooOO
if 27 - 27: OOooOOo + iIii1I11I1II1 . I1Ii111 % i1IIi % iII111i
if 13 - 13: IiII / I11i + ooOoO0o - II111iiii . OOooOOo
if ( lisp_decent_push_configured ) :
O0oo0oo0 = IiII1iiI . action
if ( O0OOo0OO0oOo and O0oo0oo0 == LISP_DROP_ACTION ) :
if ( IiII1iiI . eid . is_local ( ) ) : continue
if 17 - 17: I1ii11iIi11i . Ii1I / IiII - i1IIi - Ii1I
if 95 - 95: IiII % I11i % iIii1I11I1II1 . OoO0O00
if 11 - 11: i11iIiiIii - IiII . o0oOOo0O0Ooo / IiII - I1IiiI
if 66 - 66: iIii1I11I1II1 . i1IIi . i11iIiiIii % I1ii11iIi11i * OOooOOo % IiII
if 34 - 34: I1IiiI % I11i - iII111i - i11iIiiIii - iIii1I11I1II1 / i1IIi
if 7 - 7: I1IiiI + iIii1I11I1II1 . oO0o
if 17 - 17: OoO0O00 / OoO0O00 + o0oOOo0O0Ooo / OOooOOo . I1ii11iIi11i % IiII
if ( IiII1iiI . eid . is_null ( ) ) : continue
if 40 - 40: OoOoOO00
if 81 - 81: Ii1I % I1Ii111 / I1ii11iIi11i % iII111i
if 39 - 39: i1IIi . iII111i . Oo0Ooo % Oo0Ooo * IiII % Ii1I
if 40 - 40: o0oOOo0O0Ooo * i11iIiiIii . ooOoO0o
if 63 - 63: I1Ii111 / Ii1I - iIii1I11I1II1 / i11iIiiIii / IiII + I11i
if ( O0OOo0OO0oOo ) :
ooooOoo000O = lisp_map_cache_lookup ( IiII1iiI . eid , IiII1iiI . group )
else :
ooooOoo000O = lisp_map_cache . lookup_cache ( IiII1iiI . eid , True )
if 10 - 10: i1IIi . IiII
IIiIIiiIIi1 = ( ooooOoo000O == None )
if 52 - 52: I1Ii111 - OOooOOo * OoOoOO00
if 54 - 54: iIii1I11I1II1 * OoO0O00 / Oo0Ooo + OoooooooOO
if 38 - 38: iIii1I11I1II1 + OOooOOo + OoO0O00 . iII111i / i1IIi + II111iiii
if 54 - 54: Ii1I - I1IiiI + iII111i * iII111i
iiiI11II1IiIi = [ ]
for o0000o0O0ooo in range ( IiII1iiI . rloc_count ) :
o00o = lisp_rloc_record ( )
o00o . keys = Iiii . keys
packet = o00o . decode ( packet , Iiii . nonce )
if ( packet == None ) :
lprint ( "Could not decode RLOC-record in Map-Reply packet" )
return
if 87 - 87: I11i
o00o . print_record ( " " )
if 67 - 67: i1IIi / i1IIi + IiII . oO0o
OoOO0 = None
if ( ooooOoo000O ) : OoOO0 = ooooOoo000O . get_rloc ( o00o . rloc )
if ( OoOO0 ) :
Oo0o0o0oo = OoOO0
else :
Oo0o0o0oo = lisp_rloc ( )
if 75 - 75: oO0o * OoO0O00 * I11i + oO0o + O0 . I1Ii111
if 8 - 8: I1ii11iIi11i / i1IIi - I1ii11iIi11i + Ii1I + OoO0O00 - I11i
if 79 - 79: OoooooooOO - I1Ii111 * I1IiiI . I1Ii111 - iIii1I11I1II1
if 27 - 27: OoOoOO00 % OoOoOO00 % II111iiii
if 45 - 45: iIii1I11I1II1 . o0oOOo0O0Ooo % I1IiiI
if 10 - 10: I1IiiI / i1IIi * o0oOOo0O0Ooo + Oo0Ooo - OoOoOO00 % iII111i
if 88 - 88: Ii1I % Ii1I
Iiiii = Oo0o0o0oo . store_rloc_from_record ( o00o , Iiii . nonce ,
source )
Oo0o0o0oo . echo_nonce_capable = Iiii . echo_nonce_capable
if 29 - 29: OOooOOo % I1ii11iIi11i
if ( Oo0o0o0oo . echo_nonce_capable ) :
ooOOo0o = Oo0o0o0oo . rloc . print_address_no_iid ( )
if ( lisp_get_echo_nonce ( None , ooOOo0o ) == None ) :
lisp_echo_nonce ( ooOOo0o )
if 57 - 57: I1ii11iIi11i - OoOoOO00 + IiII
if 58 - 58: OOooOOo % I1IiiI / oO0o . ooOoO0o . OoO0O00 / IiII
if 72 - 72: ooOoO0o + ooOoO0o + o0oOOo0O0Ooo - o0oOOo0O0Ooo % Ii1I
if 52 - 52: I11i % i1IIi . I1ii11iIi11i
if 62 - 62: ooOoO0o - I1ii11iIi11i
if 71 - 71: I11i
if 34 - 34: oO0o / O0 * oO0o
if ( ooooOoo000O and ooooOoo000O . gleaned ) :
Oo0o0o0oo = ooooOoo000O . rloc_set [ 0 ]
Iiiii = Oo0o0o0oo . translated_port
if 47 - 47: iIii1I11I1II1 - o0oOOo0O0Ooo % Ii1I
if 38 - 38: ooOoO0o / IiII * I1ii11iIi11i % I1ii11iIi11i % oO0o
if 82 - 82: I1ii11iIi11i . i11iIiiIii - I11i . iII111i / OOooOOo
if 60 - 60: I1IiiI / I1IiiI / II111iiii
if 59 - 59: OOooOOo . oO0o + ooOoO0o % o0oOOo0O0Ooo . i11iIiiIii
if 27 - 27: OoOoOO00 - OoooooooOO / IiII / II111iiii * OOooOOo * ooOoO0o
if 43 - 43: II111iiii . IiII - I1IiiI * I1ii11iIi11i + OoooooooOO
if 34 - 34: I1Ii111 / i1IIi
if 95 - 95: OoOoOO00 * OOooOOo
if ( Iiii . rloc_probe and o00o . probe_bit ) :
if ( Oo0o0o0oo . rloc . afi == source . afi ) :
lisp_process_rloc_probe_reply ( Oo0o0o0oo . rloc , source , Iiiii ,
Iiii . nonce , Iiii . hop_count , ttl )
if 68 - 68: I1Ii111 / iIii1I11I1II1 % Ii1I
if 77 - 77: i11iIiiIii + i11iIiiIii - I1ii11iIi11i % I1ii11iIi11i
if 26 - 26: oO0o + OoooooooOO % o0oOOo0O0Ooo
if 96 - 96: ooOoO0o * OoOoOO00 - II111iiii
if 40 - 40: oO0o * OOooOOo + Ii1I + I11i * Ii1I + OoooooooOO
if 77 - 77: OOooOOo + ooOoO0o / O0
iiiI11II1IiIi . append ( Oo0o0o0oo )
if 16 - 16: ooOoO0o + Oo0Ooo * Oo0Ooo . I11i - IiII
if 49 - 49: ooOoO0o . Ii1I
if 75 - 75: OOooOOo / II111iiii - Oo0Ooo + I1Ii111
if 42 - 42: OoooooooOO * II111iiii + Ii1I % OoO0O00 / I1Ii111
if ( lisp_data_plane_security and Oo0o0o0oo . rloc_recent_rekey ( ) ) :
oOo00OoOoo = Oo0o0o0oo
if 11 - 11: ooOoO0o / Oo0Ooo + i1IIi / IiII
if 4 - 4: iII111i - Oo0Ooo
if 100 - 100: OOooOOo . i1IIi
if 15 - 15: O0 % Oo0Ooo % o0oOOo0O0Ooo . ooOoO0o * iII111i % O0
if 31 - 31: i1IIi . Ii1I - OoooooooOO * I11i * ooOoO0o % oO0o
if 61 - 61: I1Ii111 . Ii1I * I1ii11iIi11i
if 59 - 59: OoOoOO00 + Oo0Ooo . I1ii11iIi11i - Ii1I
if 48 - 48: I1Ii111 % Ii1I + I1IiiI * OoooooooOO % OoOoOO00 % i11iIiiIii
if 13 - 13: iII111i % i1IIi
if 13 - 13: iII111i / OoooooooOO + Ii1I / iII111i
if 29 - 29: OOooOOo + ooOoO0o % o0oOOo0O0Ooo
if ( Iiii . rloc_probe == False and lisp_nat_traversal ) :
Oo = [ ]
I1IIiI = [ ]
for Oo0o0o0oo in iiiI11II1IiIi :
if 99 - 99: OoooooooOO - Oo0Ooo / IiII
if 70 - 70: i11iIiiIii + ooOoO0o
if 44 - 44: OOooOOo
if 77 - 77: OoooooooOO * Ii1I * iIii1I11I1II1 + IiII
if 53 - 53: IiII + I1Ii111 + oO0o
if ( Oo0o0o0oo . rloc . is_private_address ( ) ) :
Oo0o0o0oo . priority = 1
Oo0o0o0oo . state = LISP_RLOC_UNREACH_STATE
Oo . append ( Oo0o0o0oo )
I1IIiI . append ( Oo0o0o0oo . rloc . print_address_no_iid ( ) )
continue
if 31 - 31: OOooOOo + OoOoOO00 * OOooOOo + OoOoOO00 / o0oOOo0O0Ooo . iIii1I11I1II1
if 1 - 1: I1Ii111 * i11iIiiIii % I1Ii111 - OoO0O00 + I1Ii111 / Oo0Ooo
if 3 - 3: OOooOOo - i11iIiiIii / I1Ii111 . OOooOOo - OoO0O00
if 60 - 60: OoOoOO00 / i1IIi . Ii1I - OoO0O00 - OoooooooOO
if 39 - 39: I1IiiI + i1IIi * OoO0O00 % I11i
if 41 - 41: I1ii11iIi11i * IiII
if ( Oo0o0o0oo . priority == 254 and lisp_i_am_rtr == False ) :
Oo . append ( Oo0o0o0oo )
I1IIiI . append ( Oo0o0o0oo . rloc . print_address_no_iid ( ) )
if 16 - 16: I1Ii111 % iIii1I11I1II1 / I1IiiI * OoOoOO00 / IiII / OoOoOO00
if ( Oo0o0o0oo . priority != 254 and lisp_i_am_rtr ) :
Oo . append ( Oo0o0o0oo )
I1IIiI . append ( Oo0o0o0oo . rloc . print_address_no_iid ( ) )
if 29 - 29: OoooooooOO / oO0o
if 1 - 1: OoOoOO00 . i11iIiiIii % I1Ii111 + OoooooooOO - Oo0Ooo . I1ii11iIi11i
if 46 - 46: i11iIiiIii + I11i - iIii1I11I1II1 / OoO0O00 - ooOoO0o / i1IIi
if ( I1IIiI != [ ] ) :
iiiI11II1IiIi = Oo
lprint ( "NAT-traversal optimized RLOC-set: {}" . format ( I1IIiI ) )
if 44 - 44: o0oOOo0O0Ooo + Oo0Ooo
if 46 - 46: OOooOOo % I1IiiI
if 66 - 66: iIii1I11I1II1 . o0oOOo0O0Ooo - ooOoO0o
if 27 - 27: Oo0Ooo - i1IIi * OoooooooOO - OoOoOO00 + OoOoOO00
if 24 - 24: i1IIi . OoOoOO00 / I1Ii111 + O0
if 86 - 86: Ii1I * OoOoOO00 % I1ii11iIi11i + OOooOOo
if 85 - 85: iII111i % i11iIiiIii
Oo = [ ]
for Oo0o0o0oo in iiiI11II1IiIi :
if ( Oo0o0o0oo . json != None ) : continue
Oo . append ( Oo0o0o0oo )
if 78 - 78: i11iIiiIii / I11i / Oo0Ooo + II111iiii - I1ii11iIi11i / I1ii11iIi11i
if ( Oo != [ ] ) :
i1Ii11II = len ( iiiI11II1IiIi ) - len ( Oo )
lprint ( "Pruning {} no-address RLOC-records for map-cache" . format ( i1Ii11II ) )
if 28 - 28: iIii1I11I1II1 / IiII - iIii1I11I1II1 . i1IIi - O0 * ooOoO0o
iiiI11II1IiIi = Oo
if 41 - 41: Ii1I + IiII
if 37 - 37: I1Ii111 / o0oOOo0O0Ooo - ooOoO0o - OoooooooOO . I1ii11iIi11i % I1Ii111
if 53 - 53: I1IiiI % OOooOOo + Ii1I - Ii1I
if 99 - 99: i1IIi * OoOoOO00 - i1IIi
if 65 - 65: OoO0O00 / i11iIiiIii + I1ii11iIi11i + OoOoOO00
if 82 - 82: Ii1I * OOooOOo % ooOoO0o / OoO0O00 - Oo0Ooo . I1Ii111
if 90 - 90: I11i * i11iIiiIii % i1IIi + I1Ii111 / OoO0O00
if 15 - 15: Oo0Ooo + oO0o . I11i % OoO0O00
if ( Iiii . rloc_probe and ooooOoo000O != None ) : iiiI11II1IiIi = ooooOoo000O . rloc_set
if 13 - 13: I1ii11iIi11i / ooOoO0o * I1Ii111
if 45 - 45: I1ii11iIi11i - I11i
if 60 - 60: OOooOOo - OOooOOo * OoOoOO00 / Ii1I % iII111i % Oo0Ooo
if 75 - 75: iIii1I11I1II1 - IiII - I1Ii111
if 4 - 4: i11iIiiIii % OoooooooOO . i11iIiiIii
ooiIi1 = IIiIIiiIIi1
if ( ooooOoo000O and iiiI11II1IiIi != ooooOoo000O . rloc_set ) :
ooooOoo000O . delete_rlocs_from_rloc_probe_list ( )
ooiIi1 = True
if 49 - 49: i1IIi * iII111i - iIii1I11I1II1 % I11i * O0 / OoOoOO00
if 48 - 48: IiII
if 69 - 69: o0oOOo0O0Ooo % i11iIiiIii - OOooOOo - o0oOOo0O0Ooo
if 98 - 98: o0oOOo0O0Ooo * OoO0O00 . OoooooooOO
if 40 - 40: I1Ii111 + Oo0Ooo + I1Ii111
o00ooO0OOOooo0 = ooooOoo000O . uptime if ( ooooOoo000O ) else None
if ( ooooOoo000O == None or ooooOoo000O . gleaned == False ) :
ooooOoo000O = lisp_mapping ( IiII1iiI . eid , IiII1iiI . group , iiiI11II1IiIi )
ooooOoo000O . mapping_source = source
ooooOoo000O . map_cache_ttl = IiII1iiI . store_ttl ( )
ooooOoo000O . action = IiII1iiI . action
ooooOoo000O . add_cache ( ooiIi1 )
if 75 - 75: o0oOOo0O0Ooo % o0oOOo0O0Ooo . I1IiiI / OoO0O00
if 22 - 22: Oo0Ooo / iIii1I11I1II1 + o0oOOo0O0Ooo
IiI1iiIi1I1i = "Add"
if ( o00ooO0OOOooo0 ) :
ooooOoo000O . uptime = o00ooO0OOOooo0
ooooOoo000O . refresh_time = lisp_get_timestamp ( )
IiI1iiIi1I1i = "Replace"
if 35 - 35: OoO0O00 + II111iiii / I11i
if 45 - 45: i11iIiiIii . I1IiiI % I1Ii111 / I1ii11iIi11i
lprint ( "{} {} map-cache with {} RLOCs" . format ( IiI1iiIi1I1i ,
green ( ooooOoo000O . print_eid_tuple ( ) , False ) , len ( iiiI11II1IiIi ) ) )
if 14 - 14: IiII . OOooOOo - Oo0Ooo * oO0o
if 31 - 31: I1IiiI + OOooOOo
if 90 - 90: I1Ii111 * OOooOOo / i1IIi / iIii1I11I1II1 / OoooooooOO
if 37 - 37: O0 * I11i . O0 / II111iiii % oO0o
if 19 - 19: Ii1I - oO0o
if ( lisp_ipc_dp_socket and oOo00OoOoo != None ) :
lisp_write_ipc_keys ( oOo00OoOoo )
if 72 - 72: oO0o / I11i % II111iiii
if 22 - 22: i11iIiiIii % IiII % IiII % I11i - OoooooooOO + I1IiiI
if 31 - 31: I11i + I1ii11iIi11i . i1IIi * i11iIiiIii + I1ii11iIi11i
if 97 - 97: ooOoO0o * iIii1I11I1II1 * i1IIi * II111iiii - OOooOOo - o0oOOo0O0Ooo
if 37 - 37: II111iiii
if 27 - 27: Oo0Ooo * OoooooooOO / I1IiiI
if 43 - 43: OoO0O00
if ( IIiIIiiIIi1 ) :
oo00OO0Oooo = bold ( "RLOC-probe" , False )
for Oo0o0o0oo in ooooOoo000O . best_rloc_set :
ooOOo0o = red ( Oo0o0o0oo . rloc . print_address_no_iid ( ) , False )
lprint ( "Trigger {} to {}" . format ( oo00OO0Oooo , ooOOo0o ) )
lisp_send_map_request ( lisp_sockets , 0 , ooooOoo000O . eid , ooooOoo000O . group , Oo0o0o0oo )
if 6 - 6: Ii1I - I1Ii111 . O0 - I1IiiI
if 50 - 50: II111iiii . I1Ii111 + iII111i . OoO0O00 % I1IiiI * iII111i
if 27 - 27: OoooooooOO
return
if 27 - 27: o0oOOo0O0Ooo % I1ii11iIi11i - I11i % ooOoO0o / OOooOOo / iII111i
if 80 - 80: i1IIi
if 74 - 74: I1ii11iIi11i . OoO0O00 + i11iIiiIii
if 19 - 19: i1IIi / I1IiiI + IiII . iII111i
if 68 - 68: iII111i
if 29 - 29: II111iiii / II111iiii % OoO0O00 % Oo0Ooo . II111iiii
if 33 - 33: OoooooooOO . OoO0O00 % OoooooooOO
if 9 - 9: IiII * O0 + OOooOOo . II111iiii
def lisp_compute_auth ( packet , map_register , password ) :
if ( map_register . alg_id == LISP_NONE_ALG_ID ) : return ( packet )
if 14 - 14: iIii1I11I1II1 + i11iIiiIii + o0oOOo0O0Ooo + o0oOOo0O0Ooo - IiII / I1Ii111
packet = map_register . zero_auth ( packet )
ooo000 = lisp_hash_me ( packet , map_register . alg_id , password , False )
if 70 - 70: OoooooooOO + I1IiiI / OOooOOo
if 19 - 19: I1Ii111 + i1IIi % OoooooooOO + i1IIi
if 16 - 16: I1Ii111 + II111iiii + IiII
if 34 - 34: iIii1I11I1II1 - II111iiii - ooOoO0o + oO0o
map_register . auth_data = ooo000
packet = map_register . encode_auth ( packet )
return ( packet )
if 46 - 46: ooOoO0o % II111iiii
if 61 - 61: OoO0O00 . I1IiiI
if 89 - 89: IiII
if 73 - 73: II111iiii + ooOoO0o % OOooOOo . oO0o / oO0o * i1IIi
if 19 - 19: I1Ii111 + I11i
if 21 - 21: OoOoOO00
if 2 - 2: i1IIi . OOooOOo
def lisp_hash_me ( packet , alg_id , password , do_hex ) :
if ( alg_id == LISP_NONE_ALG_ID ) : return ( True )
if 23 - 23: Ii1I - OOooOOo
if ( alg_id == LISP_SHA_1_96_ALG_ID ) :
oO = hashlib . sha1
if 28 - 28: OoO0O00 . IiII - i1IIi * OOooOOo - I1Ii111
if ( alg_id == LISP_SHA_256_128_ALG_ID ) :
oO = hashlib . sha256
if 65 - 65: iIii1I11I1II1 / IiII / IiII
if 57 - 57: OoOoOO00 . O0 / iII111i / i11iIiiIii
if ( do_hex ) :
ooo000 = hmac . new ( password , packet , oO ) . hexdigest ( )
else :
ooo000 = hmac . new ( password , packet , oO ) . digest ( )
if 38 - 38: iII111i - Oo0Ooo / O0
return ( ooo000 )
if 40 - 40: ooOoO0o + iIii1I11I1II1 / OoOoOO00 * iIii1I11I1II1 - ooOoO0o * iIii1I11I1II1
if 79 - 79: ooOoO0o . oO0o + Ii1I * ooOoO0o + O0 . II111iiii
if 8 - 8: IiII * OOooOOo + I11i + O0 * oO0o - oO0o
if 19 - 19: OoO0O00 - ooOoO0o + I1ii11iIi11i / I1ii11iIi11i % I1Ii111 % iIii1I11I1II1
if 5 - 5: OoooooooOO + ooOoO0o - II111iiii . i11iIiiIii / oO0o - ooOoO0o
if 3 - 3: iII111i
if 74 - 74: i11iIiiIii + OoooooooOO . OOooOOo
if 29 - 29: IiII % OoO0O00
def lisp_verify_auth ( packet , alg_id , auth_data , password ) :
if ( alg_id == LISP_NONE_ALG_ID ) : return ( True )
if 53 - 53: OoooooooOO - OoOoOO00 / IiII - I1Ii111
ooo000 = lisp_hash_me ( packet , alg_id , password , True )
IiI1 = ( ooo000 == auth_data )
if 97 - 97: II111iiii . OOooOOo
if 68 - 68: IiII * IiII + oO0o / o0oOOo0O0Ooo
if 41 - 41: OoOoOO00 - O0
if 48 - 48: OoooooooOO % Ii1I * OoO0O00 / I1ii11iIi11i
if ( IiI1 == False ) :
lprint ( "Hashed value: {} does not match packet value: {}" . format ( ooo000 , auth_data ) )
if 53 - 53: ooOoO0o + oO0o - II111iiii
if 92 - 92: Oo0Ooo - I11i . ooOoO0o % oO0o
return ( IiI1 )
if 6 - 6: iIii1I11I1II1 + oO0o
if 8 - 8: I1ii11iIi11i + o0oOOo0O0Ooo
if 29 - 29: Ii1I . OOooOOo
if 59 - 59: O0 . OoO0O00
if 10 - 10: I1Ii111 / OoooooooOO / OoO0O00 * ooOoO0o
if 81 - 81: i1IIi % I11i * iIii1I11I1II1
if 39 - 39: iIii1I11I1II1 / O0 . OoooooooOO - O0 . OoO0O00 . oO0o
def lisp_retransmit_map_notify ( map_notify ) :
iIi11i1I11Ii = map_notify . etr
Iiiii = map_notify . etr_port
if 59 - 59: II111iiii * I1IiiI
if 12 - 12: i11iIiiIii - IiII . iII111i . Ii1I
if 34 - 34: i1IIi % iII111i + Oo0Ooo * OoOoOO00 + OoO0O00
if 37 - 37: I1Ii111 / OoooooooOO
if 19 - 19: Ii1I - O0 + I1IiiI + OoooooooOO + ooOoO0o - Oo0Ooo
if ( map_notify . retry_count == LISP_MAX_MAP_NOTIFY_RETRIES ) :
lprint ( "Map-Notify with nonce 0x{} retry limit reached for ETR {}" . format ( map_notify . nonce_key , red ( iIi11i1I11Ii . print_address ( ) , False ) ) )
if 45 - 45: I1IiiI . OoOoOO00 . OoOoOO00
if 20 - 20: OoOoOO00
Iiii11 = map_notify . nonce_key
if ( lisp_map_notify_queue . has_key ( Iiii11 ) ) :
map_notify . retransmit_timer . cancel ( )
lprint ( "Dequeue Map-Notify from retransmit queue, key is: {}" . format ( Iiii11 ) )
if 69 - 69: OoOoOO00 * Ii1I % ooOoO0o . OoOoOO00 / oO0o * I1Ii111
try :
lisp_map_notify_queue . pop ( Iiii11 )
except :
lprint ( "Key not found in Map-Notify queue" )
if 93 - 93: OoO0O00 % IiII % ooOoO0o . I1IiiI
if 96 - 96: II111iiii
return
if 73 - 73: II111iiii
if 81 - 81: I1IiiI + OoO0O00
o000oOOooO00 = map_notify . lisp_sockets
map_notify . retry_count += 1
if 22 - 22: OoO0O00 * OoOoOO00 * I11i * IiII . OoO0O00 . I1ii11iIi11i
lprint ( "Retransmit {} with nonce 0x{} to xTR {}, retry {}" . format ( bold ( "Map-Notify" , False ) , map_notify . nonce_key ,
# OOooOOo * o0oOOo0O0Ooo
red ( iIi11i1I11Ii . print_address ( ) , False ) , map_notify . retry_count ) )
if 48 - 48: i11iIiiIii / ooOoO0o . OoOoOO00 . O0 * i11iIiiIii
lisp_send_map_notify ( o000oOOooO00 , map_notify . packet , iIi11i1I11Ii , Iiiii )
if ( map_notify . site ) : map_notify . site . map_notifies_sent += 1
if 11 - 11: iIii1I11I1II1 . i1IIi . O0 / ooOoO0o
if 64 - 64: i11iIiiIii + I1IiiI / Oo0Ooo - iII111i
if 26 - 26: I1ii11iIi11i
if 67 - 67: I1Ii111 * iIii1I11I1II1 / O0 + OoO0O00 * iIii1I11I1II1 % II111iiii
map_notify . retransmit_timer = threading . Timer ( LISP_MAP_NOTIFY_INTERVAL ,
lisp_retransmit_map_notify , [ map_notify ] )
map_notify . retransmit_timer . start ( )
return
if 13 - 13: Ii1I / ooOoO0o / iII111i % II111iiii * I1IiiI * II111iiii
if 40 - 40: Ii1I / i1IIi . iII111i
if 65 - 65: iIii1I11I1II1 * O0 . II111iiii * o0oOOo0O0Ooo . I1ii11iIi11i * I1IiiI
if 63 - 63: II111iiii . Oo0Ooo % iIii1I11I1II1
if 85 - 85: I1IiiI + i1IIi % I1Ii111
if 76 - 76: i11iIiiIii % i11iIiiIii
if 33 - 33: OOooOOo . ooOoO0o / iIii1I11I1II1 * OOooOOo / oO0o
def lisp_send_merged_map_notify ( lisp_sockets , parent , map_register ,
eid_record ) :
if 75 - 75: Ii1I - OoOoOO00 . OOooOOo - o0oOOo0O0Ooo - I1ii11iIi11i
if 69 - 69: O0 % I1ii11iIi11i
if 77 - 77: iIii1I11I1II1 . OOooOOo
if 64 - 64: OoOoOO00 - i1IIi * i1IIi / iII111i * OoOoOO00 * OoO0O00
eid_record . rloc_count = len ( parent . registered_rlocs )
oOO0OoO0O = eid_record . encode ( )
eid_record . print_record ( "Merged Map-Notify " , False )
if 87 - 87: ooOoO0o
if 80 - 80: I1Ii111 . iIii1I11I1II1
if 33 - 33: OoO0O00 - I11i - Oo0Ooo
if 57 - 57: I1Ii111 % i11iIiiIii
for Iii in parent . registered_rlocs :
o00o = lisp_rloc_record ( )
o00o . store_rloc_entry ( Iii )
oOO0OoO0O += o00o . encode ( )
o00o . print_record ( " " )
del ( o00o )
if 73 - 73: Oo0Ooo - IiII / oO0o
if 90 - 90: oO0o + O0
if 35 - 35: I11i % I1Ii111
if 64 - 64: I11i + IiII - o0oOOo0O0Ooo - I11i - Oo0Ooo - Ii1I
if 9 - 9: ooOoO0o
for Iii in parent . registered_rlocs :
iIi11i1I11Ii = Iii . rloc
oO0o0ooo = lisp_map_notify ( lisp_sockets )
oO0o0ooo . record_count = 1
OoooOOo0oOO = map_register . key_id
oO0o0ooo . key_id = OoooOOo0oOO
oO0o0ooo . alg_id = map_register . alg_id
oO0o0ooo . auth_len = map_register . auth_len
oO0o0ooo . nonce = map_register . nonce
oO0o0ooo . nonce_key = lisp_hex_string ( oO0o0ooo . nonce )
oO0o0ooo . etr . copy_address ( iIi11i1I11Ii )
oO0o0ooo . etr_port = map_register . sport
oO0o0ooo . site = parent . site
oOo = oO0o0ooo . encode ( oOO0OoO0O , parent . site . auth_key [ OoooOOo0oOO ] )
oO0o0ooo . print_notify ( )
if 33 - 33: i11iIiiIii . iII111i % o0oOOo0O0Ooo
if 35 - 35: OoO0O00 + OOooOOo % II111iiii * Ii1I / OoOoOO00
if 71 - 71: OOooOOo / i1IIi
if 50 - 50: iIii1I11I1II1 * IiII
Iiii11 = oO0o0ooo . nonce_key
if ( lisp_map_notify_queue . has_key ( Iiii11 ) ) :
ooO0oI1 = lisp_map_notify_queue [ Iiii11 ]
ooO0oI1 . retransmit_timer . cancel ( )
del ( ooO0oI1 )
if 29 - 29: OOooOOo * iIii1I11I1II1 * ooOoO0o
lisp_map_notify_queue [ Iiii11 ] = oO0o0ooo
if 80 - 80: oO0o * I1Ii111
if 87 - 87: iII111i + OoOoOO00 % ooOoO0o - oO0o
if 40 - 40: i1IIi / OoOoOO00 - I11i / ooOoO0o . Ii1I
if 8 - 8: I1IiiI . IiII . OOooOOo . O0
lprint ( "Send merged Map-Notify to ETR {}" . format ( red ( iIi11i1I11Ii . print_address ( ) , False ) ) )
if 3 - 3: Ii1I + i11iIiiIii
lisp_send ( lisp_sockets , iIi11i1I11Ii , LISP_CTRL_PORT , oOo )
if 87 - 87: ooOoO0o - iII111i % I11i
parent . site . map_notifies_sent += 1
if 88 - 88: I11i . OoooooooOO
if 86 - 86: Ii1I - I1IiiI - iII111i % Ii1I . I1ii11iIi11i % i1IIi
if 84 - 84: OoOoOO00
if 99 - 99: OoO0O00 - OoOoOO00 - i1IIi / OoO0O00 * I1ii11iIi11i * iIii1I11I1II1
oO0o0ooo . retransmit_timer = threading . Timer ( LISP_MAP_NOTIFY_INTERVAL ,
lisp_retransmit_map_notify , [ oO0o0ooo ] )
oO0o0ooo . retransmit_timer . start ( )
if 65 - 65: iII111i - O0 / i1IIi . I1Ii111
return
if 85 - 85: o0oOOo0O0Ooo % Ii1I
if 81 - 81: oO0o / OoO0O00 * i1IIi % iIii1I11I1II1
if 23 - 23: II111iiii . II111iiii
if 17 - 17: i11iIiiIii / IiII * I1IiiI . Oo0Ooo / o0oOOo0O0Ooo - iIii1I11I1II1
if 21 - 21: OOooOOo % Ii1I
if 3 - 3: OOooOOo / ooOoO0o / I1Ii111 . I11i
if 54 - 54: I1ii11iIi11i - I1IiiI . OoOoOO00
def lisp_build_map_notify ( lisp_sockets , eid_records , eid_list , record_count ,
source , port , nonce , key_id , alg_id , auth_len , site , map_register_ack ) :
if 36 - 36: OoO0O00 * I1IiiI / iII111i
Iiii11 = lisp_hex_string ( nonce ) + source . print_address ( )
if 95 - 95: Ii1I . Oo0Ooo
if 42 - 42: IiII . i1IIi % O0 * ooOoO0o - OOooOOo % ooOoO0o
if 99 - 99: i1IIi + OoOoOO00 - iII111i % II111iiii
if 6 - 6: ooOoO0o - I1Ii111 . OoOoOO00
if 64 - 64: iII111i + I1ii11iIi11i
if 88 - 88: I1Ii111 / i11iIiiIii - O0 . II111iiii / II111iiii * II111iiii
lisp_remove_eid_from_map_notify_queue ( eid_list )
if ( lisp_map_notify_queue . has_key ( Iiii11 ) ) :
oO0o0ooo = lisp_map_notify_queue [ Iiii11 ]
o00oOOO = red ( source . print_address_no_iid ( ) , False )
lprint ( "Map-Notify with nonce 0x{} pending for xTR {}" . format ( lisp_hex_string ( oO0o0ooo . nonce ) , o00oOOO ) )
if 56 - 56: Oo0Ooo / I1IiiI % I1Ii111 % I1ii11iIi11i * I1IiiI - IiII
return
if 39 - 39: oO0o + iII111i . I1Ii111 * i11iIiiIii % o0oOOo0O0Ooo + OOooOOo
if 61 - 61: ooOoO0o / I1Ii111 / I1ii11iIi11i - Ii1I % o0oOOo0O0Ooo * iII111i
oO0o0ooo = lisp_map_notify ( lisp_sockets )
oO0o0ooo . record_count = record_count
key_id = key_id
oO0o0ooo . key_id = key_id
oO0o0ooo . alg_id = alg_id
oO0o0ooo . auth_len = auth_len
oO0o0ooo . nonce = nonce
oO0o0ooo . nonce_key = lisp_hex_string ( nonce )
oO0o0ooo . etr . copy_address ( source )
oO0o0ooo . etr_port = port
oO0o0ooo . site = site
oO0o0ooo . eid_list = eid_list
if 94 - 94: I1IiiI / I11i
if 100 - 100: Ii1I % OoO0O00 % OoooooooOO / II111iiii * I1Ii111
if 64 - 64: I1Ii111 * OOooOOo * Ii1I + I1ii11iIi11i / iIii1I11I1II1 / Oo0Ooo
if 50 - 50: OOooOOo % i11iIiiIii
if ( map_register_ack == False ) :
Iiii11 = oO0o0ooo . nonce_key
lisp_map_notify_queue [ Iiii11 ] = oO0o0ooo
if 99 - 99: IiII
if 87 - 87: IiII
if ( map_register_ack ) :
lprint ( "Send Map-Notify to ack Map-Register" )
else :
lprint ( "Send Map-Notify for RLOC-set change" )
if 35 - 35: oO0o . O0 . Ii1I / ooOoO0o
if 36 - 36: i11iIiiIii . II111iiii . I11i . II111iiii
if 36 - 36: Ii1I + ooOoO0o / Oo0Ooo % Oo0Ooo
if 2 - 2: oO0o - Oo0Ooo * OoO0O00 . ooOoO0o . OOooOOo - oO0o
if 74 - 74: o0oOOo0O0Ooo
oOo = oO0o0ooo . encode ( eid_records , site . auth_key [ key_id ] )
oO0o0ooo . print_notify ( )
if 18 - 18: Oo0Ooo % OOooOOo / OOooOOo . I1IiiI + i1IIi . I1IiiI
if ( map_register_ack == False ) :
IiII1iiI = lisp_eid_record ( )
IiII1iiI . decode ( eid_records )
IiII1iiI . print_record ( " " , False )
if 3 - 3: O0 * O0 + II111iiii + OoOoOO00 * I11i % Oo0Ooo
if 19 - 19: oO0o % IiII % OoooooooOO % I1ii11iIi11i / OoO0O00
if 6 - 6: O0 * I1Ii111 - II111iiii
if 60 - 60: oO0o % oO0o
if 76 - 76: I1Ii111 / o0oOOo0O0Ooo
lisp_send_map_notify ( lisp_sockets , oOo , oO0o0ooo . etr , port )
site . map_notifies_sent += 1
if 19 - 19: O0 . i1IIi % iIii1I11I1II1 + OOooOOo * OoOoOO00 / I11i
if ( map_register_ack ) : return
if 82 - 82: I1ii11iIi11i
if 75 - 75: I11i - II111iiii
if 84 - 84: I1ii11iIi11i * IiII / I1IiiI - Ii1I + IiII - i1IIi
if 98 - 98: II111iiii - iII111i % i11iIiiIii + ooOoO0o
if 76 - 76: OOooOOo - iII111i + IiII
if 48 - 48: I1IiiI - II111iiii
oO0o0ooo . retransmit_timer = threading . Timer ( LISP_MAP_NOTIFY_INTERVAL ,
lisp_retransmit_map_notify , [ oO0o0ooo ] )
oO0o0ooo . retransmit_timer . start ( )
return
if 15 - 15: O0
if 54 - 54: iIii1I11I1II1
if 54 - 54: iII111i + OOooOOo + OoO0O00
if 6 - 6: oO0o - OoooooooOO * iIii1I11I1II1 * I1ii11iIi11i
if 65 - 65: IiII + OoOoOO00
if 93 - 93: Ii1I
if 43 - 43: iIii1I11I1II1 / iII111i - Ii1I + I11i % iII111i - OoO0O00
if 5 - 5: OoO0O00 / ooOoO0o
def lisp_send_map_notify_ack ( lisp_sockets , eid_records , map_notify , ms ) :
map_notify . map_notify_ack = True
if 92 - 92: Oo0Ooo / iII111i + O0 * ooOoO0o * OOooOOo % Oo0Ooo
if 97 - 97: oO0o / Ii1I
if 70 - 70: iII111i / Oo0Ooo . OoOoOO00 - II111iiii * II111iiii % I1IiiI
if 34 - 34: I1Ii111 + OOooOOo * iII111i / ooOoO0o % i11iIiiIii
oOo = map_notify . encode ( eid_records , ms . password )
map_notify . print_notify ( )
if 91 - 91: IiII * Ii1I * OOooOOo
if 17 - 17: o0oOOo0O0Ooo + Ii1I % I1ii11iIi11i + IiII % I1Ii111 + I1ii11iIi11i
if 100 - 100: I11i * OoO0O00 - i1IIi + iII111i * Ii1I - OoooooooOO
if 47 - 47: o0oOOo0O0Ooo / Ii1I - iII111i * OOooOOo / i11iIiiIii
iIi11i1I11Ii = ms . map_server
lprint ( "Send Map-Notify-Ack to {}" . format (
red ( iIi11i1I11Ii . print_address ( ) , False ) ) )
lisp_send ( lisp_sockets , iIi11i1I11Ii , LISP_CTRL_PORT , oOo )
return
if 97 - 97: iIii1I11I1II1 + OoOoOO00 + OoOoOO00 * o0oOOo0O0Ooo
if 14 - 14: II111iiii + I1ii11iIi11i * Oo0Ooo
if 95 - 95: IiII + iII111i % I1IiiI
if 18 - 18: Oo0Ooo
if 8 - 8: O0 + iIii1I11I1II1 - O0
if 67 - 67: O0
if 22 - 22: I11i / i1IIi . II111iiii % ooOoO0o / I11i - Ii1I
if 28 - 28: O0 - Oo0Ooo
def lisp_send_multicast_map_notify ( lisp_sockets , site_eid , eid_list , xtr ) :
if 58 - 58: iIii1I11I1II1 - OoooooooOO - iII111i
oO0o0ooo = lisp_map_notify ( lisp_sockets )
oO0o0ooo . record_count = 1
oO0o0ooo . nonce = lisp_get_control_nonce ( )
oO0o0ooo . nonce_key = lisp_hex_string ( oO0o0ooo . nonce )
oO0o0ooo . etr . copy_address ( xtr )
oO0o0ooo . etr_port = LISP_CTRL_PORT
oO0o0ooo . eid_list = eid_list
Iiii11 = oO0o0ooo . nonce_key
if 43 - 43: ooOoO0o / o0oOOo0O0Ooo
if 56 - 56: II111iiii * I1ii11iIi11i * O0 . iII111i . I1ii11iIi11i % I1Ii111
if 99 - 99: Oo0Ooo - OoO0O00 + OoooooooOO - I1Ii111 - I1ii11iIi11i % i1IIi
if 49 - 49: IiII % OoooooooOO / Oo0Ooo - OoOoOO00 + o0oOOo0O0Ooo / Ii1I
if 6 - 6: I11i % IiII
if 48 - 48: Ii1I
lisp_remove_eid_from_map_notify_queue ( oO0o0ooo . eid_list )
if ( lisp_map_notify_queue . has_key ( Iiii11 ) ) :
oO0o0ooo = lisp_map_notify_queue [ Iiii11 ]
lprint ( "Map-Notify with nonce 0x{} pending for ITR {}" . format ( oO0o0ooo . nonce , red ( xtr . print_address_no_iid ( ) , False ) ) )
if 100 - 100: OoO0O00 % I1Ii111 + OoooooooOO / OoO0O00
return
if 62 - 62: IiII
if 66 - 66: o0oOOo0O0Ooo % OOooOOo
if 15 - 15: Ii1I % IiII + IiII % iII111i - O0 * OoooooooOO
if 53 - 53: OoOoOO00 . Ii1I / Oo0Ooo
if 62 - 62: i11iIiiIii
lisp_map_notify_queue [ Iiii11 ] = oO0o0ooo
if 38 - 38: I1ii11iIi11i % ooOoO0o * OoooooooOO + iIii1I11I1II1 % i1IIi / OOooOOo
if 6 - 6: i11iIiiIii
if 8 - 8: iIii1I11I1II1 + I1ii11iIi11i . i1IIi % OoOoOO00 % OoooooooOO * Oo0Ooo
if 53 - 53: oO0o
iIIiiiiI11i = site_eid . rtrs_in_rloc_set ( )
if ( iIIiiiiI11i ) :
if ( site_eid . is_rtr_in_rloc_set ( xtr ) ) : iIIiiiiI11i = False
if 22 - 22: i11iIiiIii
if 70 - 70: OOooOOo
if 47 - 47: ooOoO0o . ooOoO0o + ooOoO0o % i11iIiiIii
if 95 - 95: ooOoO0o % i1IIi * iII111i / oO0o + i11iIiiIii
if 85 - 85: IiII . OoooooooOO / iII111i . oO0o * IiII . I1Ii111
IiII1iiI = lisp_eid_record ( )
IiII1iiI . record_ttl = 1440
IiII1iiI . eid . copy_address ( site_eid . eid )
IiII1iiI . group . copy_address ( site_eid . group )
IiII1iiI . rloc_count = 0
for O0OO0O in site_eid . registered_rlocs :
if ( iIIiiiiI11i ^ O0OO0O . is_rtr ( ) ) : continue
IiII1iiI . rloc_count += 1
if 68 - 68: OoO0O00 * i1IIi
oOo = IiII1iiI . encode ( )
if 39 - 39: OoO0O00 % OoO0O00
if 18 - 18: ooOoO0o * I1IiiI / iII111i % iII111i
if 9 - 9: i11iIiiIii % ooOoO0o % O0 + i1IIi / O0
if 12 - 12: I1Ii111 - iII111i * iII111i + OoO0O00 . Ii1I % I11i
oO0o0ooo . print_notify ( )
IiII1iiI . print_record ( " " , False )
if 28 - 28: ooOoO0o % OoO0O00 - II111iiii * IiII - I1IiiI + I1IiiI
if 84 - 84: IiII / Ii1I
if 39 - 39: OOooOOo - iIii1I11I1II1 + OoOoOO00 % IiII * OoooooooOO % Ii1I
if 11 - 11: I1ii11iIi11i
for O0OO0O in site_eid . registered_rlocs :
if ( iIIiiiiI11i ^ O0OO0O . is_rtr ( ) ) : continue
o00o = lisp_rloc_record ( )
o00o . store_rloc_entry ( O0OO0O )
oOo += o00o . encode ( )
o00o . print_record ( " " )
if 83 - 83: O0
if 97 - 97: O0
if 50 - 50: I1Ii111 / OoooooooOO . o0oOOo0O0Ooo + I1IiiI * i11iIiiIii
if 28 - 28: I1Ii111 * II111iiii
if 14 - 14: iIii1I11I1II1 / Ii1I + o0oOOo0O0Ooo . iII111i % iII111i . i1IIi
oOo = oO0o0ooo . encode ( oOo , "" )
if ( oOo == None ) : return
if 67 - 67: IiII * II111iiii + ooOoO0o - i11iIiiIii
if 15 - 15: I11i
if 67 - 67: iIii1I11I1II1
if 91 - 91: ooOoO0o
lisp_send_map_notify ( lisp_sockets , oOo , xtr , LISP_CTRL_PORT )
if 66 - 66: OOooOOo
if 5 - 5: i1IIi * OoOoOO00 + i1IIi % I11i
if 79 - 79: OOooOOo % iIii1I11I1II1 / OoOoOO00
if 9 - 9: Ii1I
oO0o0ooo . retransmit_timer = threading . Timer ( LISP_MAP_NOTIFY_INTERVAL ,
lisp_retransmit_map_notify , [ oO0o0ooo ] )
oO0o0ooo . retransmit_timer . start ( )
return
if 44 - 44: iII111i
if 46 - 46: I11i . i11iIiiIii * OoOoOO00 + o0oOOo0O0Ooo / ooOoO0o
if 37 - 37: OoO0O00 - Ii1I + OoO0O00
if 49 - 49: OoooooooOO - I1ii11iIi11i % I1ii11iIi11i / i1IIi . ooOoO0o
if 60 - 60: Oo0Ooo
if 46 - 46: OoOoOO00 + i1IIi
if 43 - 43: II111iiii * IiII % iIii1I11I1II1 % i11iIiiIii % I1ii11iIi11i
def lisp_queue_multicast_map_notify ( lisp_sockets , rle_list ) :
OO0O00Oo0o = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 76 - 76: i1IIi . i11iIiiIii . i11iIiiIii - iII111i + i11iIiiIii
for iIi1I in rle_list :
i11Iii11I = lisp_site_eid_lookup ( iIi1I [ 0 ] , iIi1I [ 1 ] , True )
if ( i11Iii11I == None ) : continue
if 89 - 89: Oo0Ooo / Ii1I * OoO0O00 + ooOoO0o
if 41 - 41: IiII + I11i * ooOoO0o + Oo0Ooo . ooOoO0o
if 38 - 38: iII111i * OoooooooOO - IiII
if 36 - 36: I1Ii111 * II111iiii + I1ii11iIi11i - iII111i * iII111i
if 91 - 91: O0 + I1Ii111 * II111iiii - O0 . i11iIiiIii . Oo0Ooo
if 54 - 54: ooOoO0o * I11i / I1ii11iIi11i % ooOoO0o
if 76 - 76: I11i . I1IiiI
oO0O0OO0oO = i11Iii11I . registered_rlocs
if ( len ( oO0O0OO0oO ) == 0 ) :
oOo0oOo = { }
for iIi1II1 in i11Iii11I . individual_registrations . values ( ) :
for O0OO0O in iIi1II1 . registered_rlocs :
if ( O0OO0O . is_rtr ( ) == False ) : continue
oOo0oOo [ O0OO0O . rloc . print_address ( ) ] = O0OO0O
if 1 - 1: I1Ii111 . IiII % oO0o . I1IiiI * II111iiii + i1IIi
if 55 - 55: OoooooooOO - Oo0Ooo / o0oOOo0O0Ooo - OoO0O00 % I1IiiI
oO0O0OO0oO = oOo0oOo . values ( )
if 23 - 23: OOooOOo
if 97 - 97: Oo0Ooo / OoooooooOO . OoooooooOO
if 47 - 47: OoO0O00
if 52 - 52: I1IiiI * iIii1I11I1II1 % oO0o * IiII % oO0o
if 9 - 9: I11i
if 83 - 83: i11iIiiIii
OOoo0oO = [ ]
O0O0 = False
if ( i11Iii11I . eid . address == 0 and i11Iii11I . eid . mask_len == 0 ) :
I1III1iI = [ ]
iIIiIIiii1iI = [ ] if len ( oO0O0OO0oO ) == 0 else oO0O0OO0oO [ 0 ] . rle . rle_nodes
if 61 - 61: oO0o . o0oOOo0O0Ooo
for I1I1iiI in iIIiIIiii1iI :
OOoo0oO . append ( I1I1iiI . address )
I1III1iI . append ( I1I1iiI . address . print_address_no_iid ( ) )
if 82 - 82: Oo0Ooo * OoooooooOO / ooOoO0o / I1IiiI
lprint ( "Notify existing RLE-nodes {}" . format ( I1III1iI ) )
else :
if 70 - 70: I1IiiI
if 74 - 74: ooOoO0o * II111iiii
if 96 - 96: i11iIiiIii . I1IiiI - II111iiii . I11i
if 79 - 79: OoO0O00 . OoOoOO00 - i1IIi + Ii1I * i11iIiiIii . OoooooooOO
if 83 - 83: o0oOOo0O0Ooo / oO0o
for O0OO0O in oO0O0OO0oO :
if ( O0OO0O . is_rtr ( ) ) : OOoo0oO . append ( O0OO0O . rloc )
if 24 - 24: Ii1I + oO0o / OoooooooOO % i11iIiiIii
if 1 - 1: iII111i / I1Ii111 * I1IiiI + OoOoOO00 . OoooooooOO
if 5 - 5: I1IiiI
if 74 - 74: i1IIi * Oo0Ooo - OoOoOO00 * o0oOOo0O0Ooo
if 85 - 85: iIii1I11I1II1 * IiII / i11iIiiIii - ooOoO0o - o0oOOo0O0Ooo
O0O0 = ( len ( OOoo0oO ) != 0 )
if ( O0O0 == False ) :
ooO00oO0O = lisp_site_eid_lookup ( iIi1I [ 0 ] , OO0O00Oo0o , False )
if ( ooO00oO0O == None ) : continue
if 30 - 30: OoOoOO00 - OOooOOo . Oo0Ooo
for O0OO0O in ooO00oO0O . registered_rlocs :
if ( O0OO0O . rloc . is_null ( ) ) : continue
OOoo0oO . append ( O0OO0O . rloc )
if 11 - 11: IiII - I1Ii111 - OoO0O00 * o0oOOo0O0Ooo
if 99 - 99: O0 - OoO0O00
if 95 - 95: Ii1I . IiII * o0oOOo0O0Ooo
if 91 - 91: I1Ii111
if 49 - 49: I11i
if 17 - 17: Oo0Ooo % o0oOOo0O0Ooo
if ( len ( OOoo0oO ) == 0 ) :
lprint ( "No ITRs or RTRs found for {}, Map-Notify suppressed" . format ( green ( i11Iii11I . print_eid_tuple ( ) , False ) ) )
if 3 - 3: OoO0O00 . oO0o . oO0o . Ii1I
continue
if 100 - 100: i11iIiiIii / i1IIi . I1ii11iIi11i
if 1 - 1: IiII * I1Ii111 / I1ii11iIi11i * i11iIiiIii
if 82 - 82: o0oOOo0O0Ooo * OoO0O00 / o0oOOo0O0Ooo % OoOoOO00 * iIii1I11I1II1 % O0
if 10 - 10: ooOoO0o
if 69 - 69: I11i + I1IiiI / oO0o
if 89 - 89: i1IIi % OoOoOO00 . I1ii11iIi11i
for Iii in OOoo0oO :
lprint ( "Build Map-Notify to {}TR {} for {}" . format ( "R" if O0O0 else "x" , red ( Iii . print_address_no_iid ( ) , False ) ,
# o0oOOo0O0Ooo / oO0o * I1Ii111 + iIii1I11I1II1 / IiII + o0oOOo0O0Ooo
green ( i11Iii11I . print_eid_tuple ( ) , False ) ) )
if 50 - 50: I1IiiI * ooOoO0o
IIIo000 = [ i11Iii11I . print_eid_tuple ( ) ]
lisp_send_multicast_map_notify ( lisp_sockets , i11Iii11I , IIIo000 , Iii )
time . sleep ( .001 )
if 39 - 39: OoOoOO00
if 61 - 61: OoooooooOO / ooOoO0o . i1IIi . Oo0Ooo % OoOoOO00 * OoO0O00
return
if 4 - 4: I1Ii111 . o0oOOo0O0Ooo
if 72 - 72: Ii1I * OoO0O00 / OoO0O00
if 39 - 39: oO0o
if 49 - 49: I1IiiI * I1Ii111 . I1IiiI - II111iiii
if 57 - 57: oO0o + O0 - OoOoOO00
if 14 - 14: II111iiii + i11iIiiIii + Ii1I / o0oOOo0O0Ooo . OoO0O00
if 93 - 93: o0oOOo0O0Ooo + i1IIi
if 24 - 24: i1IIi
def lisp_find_sig_in_rloc_set ( packet , rloc_count ) :
for II11iIII1i1I in range ( rloc_count ) :
o00o = lisp_rloc_record ( )
packet = o00o . decode ( packet , None )
OoO0OOOO = o00o . json
if ( OoO0OOOO == None ) : continue
if 90 - 90: Oo0Ooo . II111iiii + I1ii11iIi11i - OoOoOO00 / I11i * iII111i
try :
OoO0OOOO = json . loads ( OoO0OOOO . json_string )
except :
lprint ( "Found corrupted JSON signature" )
continue
if 58 - 58: oO0o + Oo0Ooo . O0
if 8 - 8: II111iiii + iII111i + OoO0O00 - Ii1I / I1ii11iIi11i
if ( OoO0OOOO . has_key ( "signature" ) == False ) : continue
return ( o00o )
if 86 - 86: I1ii11iIi11i
return ( None )
if 43 - 43: IiII - I1Ii111 / I1Ii111
if 25 - 25: OoOoOO00
if 52 - 52: OOooOOo + IiII
if 73 - 73: OoooooooOO - I1Ii111 % iII111i / OOooOOo . o0oOOo0O0Ooo - IiII
if 69 - 69: Ii1I . iIii1I11I1II1 / Oo0Ooo * Oo0Ooo % IiII
if 5 - 5: OOooOOo - I1Ii111 + IiII
if 82 - 82: OOooOOo
if 26 - 26: ooOoO0o + OoooooooOO + ooOoO0o * I1Ii111
if 26 - 26: I1IiiI - OOooOOo
if 34 - 34: I1Ii111 % I1IiiI . OoOoOO00 / iII111i + ooOoO0o . i11iIiiIii
if 51 - 51: OoooooooOO * I1Ii111 * I11i - I1ii11iIi11i + I1Ii111
if 50 - 50: OoooooooOO * II111iiii
if 7 - 7: ooOoO0o / I11i * iII111i
if 17 - 17: O0 % I1Ii111
if 28 - 28: i1IIi * ooOoO0o
if 14 - 14: II111iiii + II111iiii - I11i / I11i . OoOoOO00 + OoO0O00
if 92 - 92: II111iiii - II111iiii % IiII
if 48 - 48: oO0o / II111iiii + oO0o
if 16 - 16: o0oOOo0O0Ooo % II111iiii - i11iIiiIii - IiII + O0 - i11iIiiIii
def lisp_get_eid_hash ( eid ) :
OoOOo = None
for iII in lisp_eid_hashes :
if 51 - 51: Ii1I + IiII * o0oOOo0O0Ooo / I1IiiI . I1ii11iIi11i + I1ii11iIi11i
if 37 - 37: II111iiii - ooOoO0o / Oo0Ooo * iIii1I11I1II1 . II111iiii % I1Ii111
if 28 - 28: i11iIiiIii + OoO0O00 % O0 - I1ii11iIi11i % oO0o
if 30 - 30: I11i + OOooOOo
II1 = iII . instance_id
if ( II1 == - 1 ) : iII . instance_id = eid . instance_id
if 27 - 27: OoOoOO00 . ooOoO0o
ooooOOoO = eid . is_more_specific ( iII )
iII . instance_id = II1
if ( ooooOOoO ) :
OoOOo = 128 - iII . mask_len
break
if 8 - 8: ooOoO0o % o0oOOo0O0Ooo
if 22 - 22: O0 * IiII . OoO0O00
if ( OoOOo == None ) : return ( None )
if 63 - 63: oO0o % Oo0Ooo * OoO0O00 / II111iiii / Ii1I - ooOoO0o
Iiii1Ii1I = eid . address
i1I = ""
for II11iIII1i1I in range ( 0 , OoOOo / 16 ) :
iIiIi1iI11iiI = Iiii1Ii1I & 0xffff
iIiIi1iI11iiI = hex ( iIiIi1iI11iiI ) [ 2 : - 1 ]
i1I = iIiIi1iI11iiI . zfill ( 4 ) + ":" + i1I
Iiii1Ii1I >>= 16
if 53 - 53: OoOoOO00 + oO0o
if ( OoOOo % 16 != 0 ) :
iIiIi1iI11iiI = Iiii1Ii1I & 0xff
iIiIi1iI11iiI = hex ( iIiIi1iI11iiI ) [ 2 : - 1 ]
i1I = iIiIi1iI11iiI . zfill ( 2 ) + ":" + i1I
if 80 - 80: oO0o / OoOoOO00 - I11i / oO0o - iII111i - OoooooooOO
return ( i1I [ 0 : - 1 ] )
if 57 - 57: o0oOOo0O0Ooo
if 37 - 37: iII111i * o0oOOo0O0Ooo
if 23 - 23: ooOoO0o + OoooooooOO * iII111i . I11i
if 2 - 2: iIii1I11I1II1 * I1ii11iIi11i - OoooooooOO
if 93 - 93: iII111i % ooOoO0o * Oo0Ooo
if 34 - 34: O0 * oO0o
if 58 - 58: OOooOOo . iII111i - Oo0Ooo / iII111i . I11i
if 86 - 86: iIii1I11I1II1 - iII111i % Ii1I
if 18 - 18: oO0o / IiII - OOooOOo % Ii1I
if 88 - 88: i11iIiiIii
if 13 - 13: I1IiiI
def lisp_lookup_public_key ( eid ) :
II1 = eid . instance_id
if 52 - 52: Ii1I * oO0o / I1Ii111 . IiII
if 84 - 84: OoooooooOO - oO0o - I1Ii111
if 69 - 69: OoOoOO00 * Ii1I % OoooooooOO % OOooOOo * OoOoOO00
if 20 - 20: IiII
if 17 - 17: o0oOOo0O0Ooo % iIii1I11I1II1
ooo0oOo = lisp_get_eid_hash ( eid )
if ( ooo0oOo == None ) : return ( [ None , None , False ] )
if 79 - 79: I11i
ooo0oOo = "hash-" + ooo0oOo
IIIIIiI = lisp_address ( LISP_AFI_NAME , ooo0oOo , len ( ooo0oOo ) , II1 )
i1i11Ii1 = lisp_address ( LISP_AFI_NONE , "" , 0 , II1 )
if 38 - 38: I1ii11iIi11i * ooOoO0o
if 77 - 77: OOooOOo - i11iIiiIii - I1ii11iIi11i
if 94 - 94: OoO0O00 % iII111i - I1Ii111 + OoO0O00 - I1IiiI
if 65 - 65: OOooOOo
ooO00oO0O = lisp_site_eid_lookup ( IIIIIiI , i1i11Ii1 , True )
if ( ooO00oO0O == None ) : return ( [ IIIIIiI , None , False ] )
if 90 - 90: O0
if 91 - 91: O0 * OoOoOO00 - OoOoOO00 * II111iiii - iII111i
if 38 - 38: oO0o * I11i % OOooOOo
if 80 - 80: O0 % II111iiii / O0 . Oo0Ooo * OoOoOO00 + OOooOOo
oooo0 = None
for Oo0o0o0oo in ooO00oO0O . registered_rlocs :
i11IIiiII = Oo0o0o0oo . json
if ( i11IIiiII == None ) : continue
try :
i11IIiiII = json . loads ( i11IIiiII . json_string )
except :
lprint ( "Registered RLOC JSON format is invalid for {}" . format ( ooo0oOo ) )
if 31 - 31: OoO0O00 + i11iIiiIii / I11i % O0 / Ii1I
return ( [ IIIIIiI , None , False ] )
if 90 - 90: iIii1I11I1II1 % oO0o % IiII
if ( i11IIiiII . has_key ( "public-key" ) == False ) : continue
oooo0 = i11IIiiII [ "public-key" ]
break
if 84 - 84: I1IiiI * IiII * iII111i / i1IIi . II111iiii * o0oOOo0O0Ooo
return ( [ IIIIIiI , oooo0 , True ] )
if 1 - 1: oO0o - iIii1I11I1II1 % i1IIi
if 94 - 94: Oo0Ooo + iIii1I11I1II1 . OoO0O00 * oO0o . i1IIi
if 85 - 85: O0 / OoOoOO00 . iII111i
if 64 - 64: OoO0O00 + I1ii11iIi11i / OoO0O00 * I1Ii111 . Oo0Ooo
if 5 - 5: iII111i - iIii1I11I1II1 * IiII
if 52 - 52: OOooOOo
if 50 - 50: OoOoOO00 % o0oOOo0O0Ooo - II111iiii - i1IIi
if 35 - 35: Oo0Ooo - ooOoO0o % OoO0O00
def lisp_verify_cga_sig ( eid , rloc_record ) :
if 26 - 26: i1IIi * I1Ii111 * OoO0O00 - IiII
if 26 - 26: Oo0Ooo - ooOoO0o . iII111i * OoOoOO00 / OoooooooOO
if 66 - 66: I1IiiI
if 45 - 45: II111iiii * I1Ii111 - II111iiii / I1IiiI % oO0o
if 83 - 83: oO0o % OoO0O00 + I1ii11iIi11i / OoooooooOO % iII111i
o0o000OOO = json . loads ( rloc_record . json . json_string )
if 22 - 22: I1Ii111
if ( lisp_get_eid_hash ( eid ) ) :
oo0o0Oo = eid
elif ( o0o000OOO . has_key ( "signature-eid" ) ) :
iii11ii11IIii = o0o000OOO [ "signature-eid" ]
oo0o0Oo = lisp_address ( LISP_AFI_IPV6 , iii11ii11IIii , 0 , 0 )
else :
lprint ( " No signature-eid found in RLOC-record" )
return ( False )
if 45 - 45: OoO0O00
if 31 - 31: I1IiiI . O0 % Ii1I . oO0o
if 91 - 91: O0 - oO0o * O0
if 98 - 98: Ii1I
if 54 - 54: oO0o
IIIIIiI , oooo0 , oO0O00oo0O = lisp_lookup_public_key ( oo0o0Oo )
if ( IIIIIiI == None ) :
oO00oo000O = green ( oo0o0Oo . print_address ( ) , False )
lprint ( " Could not parse hash in EID {}" . format ( oO00oo000O ) )
return ( False )
if 73 - 73: OoOoOO00
if 47 - 47: oO0o
iIIi11Ii1iII = "found" if oO0O00oo0O else bold ( "not found" , False )
oO00oo000O = green ( IIIIIiI . print_address ( ) , False )
lprint ( " Lookup for crypto-hashed EID {} {}" . format ( oO00oo000O , iIIi11Ii1iII ) )
if ( oO0O00oo0O == False ) : return ( False )
if 72 - 72: I11i % ooOoO0o / O0 . O0
if ( oooo0 == None ) :
lprint ( " RLOC-record with public-key not found" )
return ( False )
if 7 - 7: O0 * I1ii11iIi11i + Ii1I + oO0o % oO0o
if 47 - 47: oO0o * I1ii11iIi11i
OoOOoo00ooOoo = oooo0 [ 0 : 8 ] + "..." + oooo0 [ - 8 : : ]
lprint ( " RLOC-record with public-key '{}' found" . format ( OoOOoo00ooOoo ) )
if 92 - 92: O0 % I1IiiI / OOooOOo
if 43 - 43: I11i - I11i
if 27 - 27: Ii1I / o0oOOo0O0Ooo . iIii1I11I1II1 . I1IiiI - OoO0O00
if 28 - 28: ooOoO0o
if 88 - 88: oO0o
o0o0Oo = o0o000OOO [ "signature" ]
if 76 - 76: OoOoOO00 / iII111i * ooOoO0o . i1IIi
try :
o0o000OOO = binascii . a2b_base64 ( o0o0Oo )
except :
lprint ( " Incorrect padding in signature string" )
return ( False )
if 28 - 28: I11i . I1ii11iIi11i
if 80 - 80: OoO0O00 - OoooooooOO * i11iIiiIii
iII1i11i1i1II = len ( o0o000OOO )
if ( iII1i11i1i1II & 1 ) :
lprint ( " Signature length is odd, length {}" . format ( iII1i11i1i1II ) )
return ( False )
if 19 - 19: I11i - IiII - i11iIiiIii % Ii1I + oO0o
if 37 - 37: i1IIi + O0 . iIii1I11I1II1 + OOooOOo
if 42 - 42: OOooOOo * ooOoO0o * i11iIiiIii + OoooooooOO . iIii1I11I1II1
if 95 - 95: i1IIi * O0 / II111iiii * OoOoOO00 * I1IiiI
if 38 - 38: OOooOOo - OoOoOO00 / OoO0O00 / o0oOOo0O0Ooo - i11iIiiIii
oOooOOoO = oo0o0Oo . print_address ( )
if 4 - 4: I1IiiI * o0oOOo0O0Ooo - I11i - OoooooooOO . OoooooooOO
if 79 - 79: oO0o - iII111i
if 34 - 34: OoooooooOO + Ii1I - iII111i + OoooooooOO / I1IiiI
if 39 - 39: o0oOOo0O0Ooo . i1IIi * OoO0O00 / II111iiii / I1ii11iIi11i * OOooOOo
oooo0 = binascii . a2b_base64 ( oooo0 )
try :
Iiii11 = ecdsa . VerifyingKey . from_pem ( oooo0 )
except :
iiO0 = bold ( "Bad public-key" , False )
lprint ( " {}, not in PEM format" . format ( iiO0 ) )
return ( False )
if 58 - 58: I1ii11iIi11i / i11iIiiIii + iII111i + I11i / oO0o
if 8 - 8: I1ii11iIi11i
if 100 - 100: OoooooooOO / I11i - Ii1I
if 11 - 11: OoO0O00
if 20 - 20: Oo0Ooo
if 34 - 34: I1Ii111 % i11iIiiIii / oO0o - i1IIi . o0oOOo0O0Ooo / oO0o
if 68 - 68: I1Ii111 % Ii1I * Oo0Ooo - O0 . IiII
if 1 - 1: I1ii11iIi11i
if 18 - 18: i11iIiiIii % OoO0O00 % OOooOOo . OOooOOo * Ii1I / II111iiii
if 81 - 81: iII111i % IiII / I11i
if 50 - 50: IiII + i1IIi % I1Ii111
try :
O0OoOOo0o = Iiii11 . verify ( o0o000OOO , oOooOOoO , hashfunc = hashlib . sha256 )
except :
lprint ( " Signature library failed for signature data '{}'" . format ( oOooOOoO ) )
if 72 - 72: I1Ii111
lprint ( " Signature used '{}'" . format ( o0o0Oo ) )
return ( False )
if 6 - 6: II111iiii - i1IIi
return ( O0OoOOo0o )
if 78 - 78: OoOoOO00 - Oo0Ooo * II111iiii % iIii1I11I1II1 . i11iIiiIii % iII111i
if 85 - 85: I1ii11iIi11i + OOooOOo % i1IIi
if 13 - 13: OOooOOo + i11iIiiIii / OOooOOo . O0 . OoO0O00 - Ii1I
if 31 - 31: OoOoOO00 * o0oOOo0O0Ooo / O0 . iII111i / i11iIiiIii
if 22 - 22: I1IiiI . OoooooooOO * I1ii11iIi11i + i11iIiiIii - O0 + i11iIiiIii
if 98 - 98: OOooOOo + I1IiiI / IiII / OoooooooOO / OOooOOo
if 8 - 8: OoooooooOO * OOooOOo * iII111i - iII111i
if 32 - 32: I1Ii111
if 28 - 28: I11i . i11iIiiIii % iIii1I11I1II1 + OoOoOO00
if 4 - 4: OOooOOo + I1ii11iIi11i - iII111i + OOooOOo / IiII
def lisp_remove_eid_from_map_notify_queue ( eid_list ) :
if 23 - 23: iIii1I11I1II1 + OoooooooOO + ooOoO0o . iII111i . Oo0Ooo - iIii1I11I1II1
if 25 - 25: O0 + I1IiiI % OOooOOo / Oo0Ooo . IiII / I1Ii111
if 84 - 84: ooOoO0o . O0 + I1IiiI * OoO0O00 - I1IiiI
if 24 - 24: Ii1I
if 23 - 23: Oo0Ooo * i1IIi / I1IiiI . I11i - I1ii11iIi11i . iIii1I11I1II1
iiiIIi = [ ]
for IiiI11I111I in eid_list :
for OOOo in lisp_map_notify_queue :
oO0o0ooo = lisp_map_notify_queue [ OOOo ]
if ( IiiI11I111I not in oO0o0ooo . eid_list ) : continue
if 1 - 1: oO0o + oO0o - OoO0O00
iiiIIi . append ( OOOo )
I1i1i1Ii1II1 = oO0o0ooo . retransmit_timer
if ( I1i1i1Ii1II1 ) : I1i1i1Ii1II1 . cancel ( )
if 29 - 29: Oo0Ooo
lprint ( "Remove from Map-Notify queue nonce 0x{} for EID {}" . format ( oO0o0ooo . nonce_key , green ( IiiI11I111I , False ) ) )
if 16 - 16: oO0o
if 52 - 52: I11i * I1IiiI % I11i - iII111i - Ii1I - OoooooooOO
if 15 - 15: iII111i
if 95 - 95: i11iIiiIii . Ii1I / II111iiii + II111iiii + Ii1I / I11i
if 72 - 72: I1Ii111 . I1Ii111 * O0 + I1ii11iIi11i / Oo0Ooo
if 96 - 96: oO0o . ooOoO0o * Oo0Ooo % ooOoO0o + I1Ii111 + iIii1I11I1II1
if 45 - 45: II111iiii
for OOOo in iiiIIi : lisp_map_notify_queue . pop ( OOOo )
return
if 42 - 42: ooOoO0o
if 62 - 62: II111iiii * o0oOOo0O0Ooo . OoO0O00 / II111iiii
if 5 - 5: OoO0O00 + O0 . OoooooooOO + I1IiiI + i1IIi * OOooOOo
if 19 - 19: OoooooooOO + i11iIiiIii / II111iiii - Oo0Ooo . OOooOOo
if 10 - 10: oO0o * Oo0Ooo
if 55 - 55: OoO0O00 - i1IIi - I11i * oO0o
if 91 - 91: I1Ii111
if 77 - 77: I1ii11iIi11i . ooOoO0o - iIii1I11I1II1 + Ii1I % II111iiii * II111iiii
def lisp_decrypt_map_register ( packet ) :
if 41 - 41: II111iiii + Oo0Ooo - IiII / I1Ii111 - OOooOOo . oO0o
if 100 - 100: ooOoO0o / I1ii11iIi11i * OoOoOO00 . I1ii11iIi11i . o0oOOo0O0Ooo * iIii1I11I1II1
if 15 - 15: iII111i + o0oOOo0O0Ooo / IiII
if 33 - 33: OoooooooOO . IiII * o0oOOo0O0Ooo
if 41 - 41: Ii1I . iII111i . o0oOOo0O0Ooo % OoooooooOO % IiII
oooooOOo0Oo = socket . ntohl ( struct . unpack ( "I" , packet [ 0 : 4 ] ) [ 0 ] )
O0OoOoo0ooOO = ( oooooOOo0Oo >> 13 ) & 0x1
if ( O0OoOoo0ooOO == 0 ) : return ( packet )
if 2 - 2: OoOoOO00 - iIii1I11I1II1 + O0 % iIii1I11I1II1 * i11iIiiIii
iIIoo0o0O0ooO0O = ( oooooOOo0Oo >> 14 ) & 0x7
if 56 - 56: OOooOOo . oO0o
if 75 - 75: oO0o + OoOoOO00 - OoooooooOO
if 38 - 38: I11i / ooOoO0o / OoOoOO00 * OOooOOo . oO0o
if 8 - 8: OoO0O00 . OOooOOo % I1Ii111 * OOooOOo / I1IiiI
try :
i1iIII1i = lisp_ms_encryption_keys [ iIIoo0o0O0ooO0O ]
i1iIII1i = i1iIII1i . zfill ( 32 )
Ii1IiiiI1ii = "0" * 8
except :
lprint ( "Cannot decrypt Map-Register with key-id {}" . format ( iIIoo0o0O0ooO0O ) )
return ( None )
if 64 - 64: iII111i * I1ii11iIi11i - OoOoOO00
if 1 - 1: i1IIi / OoO0O00 % i1IIi % i11iIiiIii / i1IIi
i1 = bold ( "Decrypt" , False )
lprint ( "{} Map-Register with key-id {}" . format ( i1 , iIIoo0o0O0ooO0O ) )
if 8 - 8: O0 / OOooOOo + iII111i % iIii1I11I1II1 % iIii1I11I1II1 . ooOoO0o
IiIi = chacha . ChaCha ( i1iIII1i , Ii1IiiiI1ii ) . decrypt ( packet [ 4 : : ] )
return ( packet [ 0 : 4 ] + IiIi )
if 47 - 47: OoO0O00 / o0oOOo0O0Ooo / Ii1I * I1IiiI % ooOoO0o / I1Ii111
if 80 - 80: I1Ii111 / O0 * O0
if 40 - 40: OoO0O00 - oO0o / o0oOOo0O0Ooo . oO0o
if 89 - 89: i11iIiiIii - II111iiii
if 67 - 67: IiII % I1Ii111 + i11iIiiIii
if 53 - 53: OOooOOo
if 95 - 95: oO0o - OOooOOo % I1Ii111 / OoooooooOO % OoooooooOO - O0
def lisp_process_map_register ( lisp_sockets , packet , source , sport ) :
global lisp_registered_count
if 21 - 21: I1Ii111 . i1IIi - iII111i % I1ii11iIi11i . OOooOOo
if 52 - 52: Ii1I * I1ii11iIi11i
if 21 - 21: I1IiiI . i11iIiiIii - o0oOOo0O0Ooo * II111iiii % iIii1I11I1II1
if 9 - 9: I1ii11iIi11i + I11i
if 20 - 20: iII111i + i1IIi / oO0o % OoooooooOO * OoOoOO00
if 70 - 70: Oo0Ooo - OOooOOo * OOooOOo / o0oOOo0O0Ooo
packet = lisp_decrypt_map_register ( packet )
if ( packet == None ) : return
if 4 - 4: OoOoOO00 / OoO0O00
o0OOO = lisp_map_register ( )
oOO , packet = o0OOO . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Register packet" )
return
if 16 - 16: i11iIiiIii - I1Ii111 . OOooOOo
o0OOO . sport = sport
if 54 - 54: I1IiiI
o0OOO . print_map_register ( )
if 4 - 4: o0oOOo0O0Ooo + o0oOOo0O0Ooo / oO0o / i1IIi
if 65 - 65: i1IIi - Ii1I
if 6 - 6: ooOoO0o . OoO0O00 / O0 * OoO0O00
if 35 - 35: Ii1I / I11i - ooOoO0o / OoooooooOO
II1iI111i11 = True
if ( o0OOO . auth_len == LISP_SHA1_160_AUTH_DATA_LEN ) :
II1iI111i11 = True
if 39 - 39: I1ii11iIi11i . i11iIiiIii + I11i . O0
if ( o0OOO . alg_id == LISP_SHA_256_128_ALG_ID ) :
II1iI111i11 = False
if 16 - 16: II111iiii . ooOoO0o . i11iIiiIii * Ii1I - o0oOOo0O0Ooo . I1IiiI
if 33 - 33: o0oOOo0O0Ooo % ooOoO0o
if 43 - 43: I1Ii111
if 81 - 81: OoOoOO00
if 97 - 97: OoO0O00
OoooO0oOO = [ ]
if 8 - 8: oO0o - OoO0O00 * I1Ii111
if 25 - 25: iII111i % OoO0O00
if 9 - 9: i1IIi / OoOoOO00 + o0oOOo0O0Ooo + OOooOOo - I1IiiI / i1IIi
if 8 - 8: o0oOOo0O0Ooo * OoO0O00 % IiII / OoooooooOO * ooOoO0o - i11iIiiIii
iIoo = None
I1IIiIiii = packet
oooOoooOO0Oo0 = [ ]
OOo00oOOo0OOO = o0OOO . record_count
for II11iIII1i1I in range ( OOo00oOOo0OOO ) :
IiII1iiI = lisp_eid_record ( )
o00o = lisp_rloc_record ( )
packet = IiII1iiI . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode EID-record in Map-Register packet" )
return
if 32 - 32: OoO0O00 . oO0o * I1Ii111 - OoOoOO00 . o0oOOo0O0Ooo % oO0o
IiII1iiI . print_record ( " " , False )
if 21 - 21: iIii1I11I1II1
if 31 - 31: OoOoOO00
if 37 - 37: i11iIiiIii + IiII
if 41 - 41: OoOoOO00 + i1IIi - iIii1I11I1II1
ooO00oO0O = lisp_site_eid_lookup ( IiII1iiI . eid , IiII1iiI . group ,
False )
if 8 - 8: I1Ii111
II = ooO00oO0O . print_eid_tuple ( ) if ooO00oO0O else None
if 6 - 6: OoOoOO00 * oO0o - II111iiii * OoO0O00 . I11i % I1Ii111
if 83 - 83: OOooOOo * I1IiiI . ooOoO0o
if 45 - 45: OoooooooOO % Oo0Ooo / oO0o
if 71 - 71: O0
if 22 - 22: iII111i * ooOoO0o * I1IiiI / II111iiii % Ii1I
if 39 - 39: OoooooooOO % i11iIiiIii
if 20 - 20: iII111i - I11i / I1ii11iIi11i * O0 + IiII % I11i
if ( ooO00oO0O and ooO00oO0O . accept_more_specifics == False ) :
if ( ooO00oO0O . eid_record_matches ( IiII1iiI ) == False ) :
OOooOo00Ooo = ooO00oO0O . parent_for_more_specifics
if ( OOooOo00Ooo ) : ooO00oO0O = OOooOo00Ooo
if 81 - 81: IiII * oO0o * IiII
if 16 - 16: IiII - OOooOOo - I1Ii111 / OoooooooOO . Ii1I
if 28 - 28: iII111i / I1ii11iIi11i - OoOoOO00 * Oo0Ooo + Ii1I * OoOoOO00
if 94 - 94: oO0o
if 95 - 95: ooOoO0o * O0 + OOooOOo
if 11 - 11: i1IIi / OoOoOO00 + OoOoOO00 + I1ii11iIi11i + OOooOOo
if 21 - 21: ooOoO0o
if 28 - 28: OoOoOO00 + OoOoOO00 - OoOoOO00 / ooOoO0o
oO00OO00o = ( ooO00oO0O and ooO00oO0O . accept_more_specifics )
if ( oO00OO00o ) :
OOOoo0 = lisp_site_eid ( ooO00oO0O . site )
OOOoo0 . dynamic = True
OOOoo0 . eid . copy_address ( IiII1iiI . eid )
OOOoo0 . group . copy_address ( IiII1iiI . group )
OOOoo0 . parent_for_more_specifics = ooO00oO0O
OOOoo0 . add_cache ( )
OOOoo0 . inherit_from_ams_parent ( )
ooO00oO0O . more_specific_registrations . append ( OOOoo0 )
ooO00oO0O = OOOoo0
else :
ooO00oO0O = lisp_site_eid_lookup ( IiII1iiI . eid , IiII1iiI . group ,
True )
if 39 - 39: OoO0O00 - o0oOOo0O0Ooo
if 100 - 100: o0oOOo0O0Ooo * OoO0O00 + I1ii11iIi11i
oO00oo000O = IiII1iiI . print_eid_tuple ( )
if 8 - 8: OOooOOo . i11iIiiIii / oO0o % OOooOOo - II111iiii % II111iiii
if ( ooO00oO0O == None ) :
iIo0OO0O000 = bold ( "Site not found" , False )
lprint ( " {} for EID {}{}" . format ( iIo0OO0O000 , green ( oO00oo000O , False ) ,
", matched non-ams {}" . format ( green ( II , False ) if II else "" ) ) )
if 46 - 46: II111iiii + OoOoOO00 % OoO0O00
if 7 - 7: oO0o + II111iiii - O0
if 32 - 32: oO0o
if 62 - 62: i11iIiiIii + OoooooooOO + IiII - OoO0O00 / oO0o * iIii1I11I1II1
if 91 - 91: o0oOOo0O0Ooo - i11iIiiIii + Oo0Ooo % iIii1I11I1II1
packet = o00o . end_of_rlocs ( packet , IiII1iiI . rloc_count )
if ( packet == None ) :
lprint ( " Could not decode RLOC-record in Map-Register packet" )
return
if 58 - 58: iII111i / ooOoO0o - I1Ii111 + I1Ii111 * ooOoO0o
continue
if 48 - 48: iII111i % O0 % Ii1I * OoO0O00 . OoO0O00
if 74 - 74: OoO0O00 * i1IIi + I1ii11iIi11i / o0oOOo0O0Ooo / i1IIi
iIoo = ooO00oO0O . site
if 94 - 94: Ii1I
if ( oO00OO00o ) :
Oo0ooo0Ooo = ooO00oO0O . parent_for_more_specifics . print_eid_tuple ( )
lprint ( " Found ams {} for site '{}' for registering prefix {}" . format ( green ( Oo0ooo0Ooo , False ) , iIoo . site_name , green ( oO00oo000O , False ) ) )
if 13 - 13: OoO0O00 - II111iiii . iII111i + OoOoOO00 / i11iIiiIii
else :
Oo0ooo0Ooo = green ( ooO00oO0O . print_eid_tuple ( ) , False )
lprint ( " Found {} for site '{}' for registering prefix {}" . format ( Oo0ooo0Ooo , iIoo . site_name , green ( oO00oo000O , False ) ) )
if 32 - 32: ooOoO0o / II111iiii / I1ii11iIi11i
if 34 - 34: iIii1I11I1II1
if 47 - 47: OOooOOo * iII111i
if 71 - 71: IiII - OoooooooOO * i11iIiiIii . OoooooooOO % i1IIi . Oo0Ooo
if 3 - 3: OoO0O00 + i11iIiiIii + oO0o * IiII
if 19 - 19: iII111i / II111iiii . I1Ii111 * I1IiiI - OOooOOo
if ( iIoo . shutdown ) :
lprint ( ( " Rejecting registration for site '{}', configured in " +
"admin-shutdown state" ) . format ( iIoo . site_name ) )
packet = o00o . end_of_rlocs ( packet , IiII1iiI . rloc_count )
continue
if 70 - 70: OoO0O00
if 42 - 42: OoooooooOO - I1Ii111 + I1ii11iIi11i * iII111i * iII111i / OoO0O00
if 85 - 85: O0 . II111iiii
if 80 - 80: O0 * I11i * I1Ii111
if 89 - 89: Ii1I * OoO0O00 . i1IIi . O0 - IiII - OoOoOO00
if 25 - 25: iII111i + i1IIi
if 64 - 64: IiII % I11i / iIii1I11I1II1
if 66 - 66: Ii1I
OoooOOo0oOO = o0OOO . key_id
if ( iIoo . auth_key . has_key ( OoooOOo0oOO ) == False ) : OoooOOo0oOO = 0
O0Ooo0 = iIoo . auth_key [ OoooOOo0oOO ]
if 95 - 95: I11i - oO0o - OOooOOo * ooOoO0o % I1IiiI
oO0O = lisp_verify_auth ( oOO , o0OOO . alg_id ,
o0OOO . auth_data , O0Ooo0 )
O0OO0o000o00 = "dynamic " if ooO00oO0O . dynamic else ""
if 85 - 85: Ii1I % OoOoOO00
O0O0oooo = bold ( "passed" if oO0O else "failed" , False )
OoooOOo0oOO = "key-id {}" . format ( OoooOOo0oOO ) if OoooOOo0oOO == o0OOO . key_id else "bad key-id {}" . format ( o0OOO . key_id )
if 28 - 28: IiII
lprint ( " Authentication {} for {}EID-prefix {}, {}" . format ( O0O0oooo , O0OO0o000o00 , green ( oO00oo000O , False ) , OoooOOo0oOO ) )
if 32 - 32: IiII * II111iiii . Ii1I
if 68 - 68: I11i / O0
if 6 - 6: oO0o - oO0o . I1IiiI % I1ii11iIi11i
if 22 - 22: Ii1I / I1IiiI / II111iiii
if 31 - 31: II111iiii - Ii1I * OOooOOo - i11iIiiIii / OoooooooOO - I1Ii111
if 76 - 76: Oo0Ooo
Oo0Oooo0 = True
OoO0 = ( lisp_get_eid_hash ( IiII1iiI . eid ) != None )
if ( OoO0 or ooO00oO0O . require_signature ) :
I1II1111iI = "Required " if ooO00oO0O . require_signature else ""
oO00oo000O = green ( oO00oo000O , False )
Oo0o0o0oo = lisp_find_sig_in_rloc_set ( packet , IiII1iiI . rloc_count )
if ( Oo0o0o0oo == None ) :
Oo0Oooo0 = False
lprint ( ( " {}EID-crypto-hash signature verification {} " + "for EID-prefix {}, no signature found" ) . format ( I1II1111iI ,
# I1Ii111 + i1IIi - ooOoO0o
bold ( "failed" , False ) , oO00oo000O ) )
else :
Oo0Oooo0 = lisp_verify_cga_sig ( IiII1iiI . eid , Oo0o0o0oo )
O0O0oooo = bold ( "passed" if Oo0Oooo0 else "failed" , False )
lprint ( ( " {}EID-crypto-hash signature verification {} " + "for EID-prefix {}" ) . format ( I1II1111iI , O0O0oooo , oO00oo000O ) )
if 23 - 23: II111iiii - O0
if 58 - 58: o0oOOo0O0Ooo * OoO0O00 + OoO0O00
if 93 - 93: IiII - I1ii11iIi11i % I11i + i1IIi % OoO0O00
if 20 - 20: oO0o . Oo0Ooo + IiII - II111iiii % Ii1I
if ( oO0O == False or Oo0Oooo0 == False ) :
packet = o00o . end_of_rlocs ( packet , IiII1iiI . rloc_count )
if ( packet == None ) :
lprint ( " Could not decode RLOC-record in Map-Register packet" )
return
if 64 - 64: Ii1I % OoO0O00 + OOooOOo % OoOoOO00 + IiII
continue
if 92 - 92: iII111i * Oo0Ooo - OoOoOO00
if 33 - 33: i11iIiiIii - OoOoOO00 . OOooOOo * II111iiii . Ii1I
if 59 - 59: OoOoOO00
if 29 - 29: iII111i - II111iiii * OoooooooOO * OoooooooOO
if 15 - 15: IiII / OOooOOo / iIii1I11I1II1 / OoOoOO00
if 91 - 91: i11iIiiIii % O0 . Oo0Ooo / I1Ii111
if ( o0OOO . merge_register_requested ) :
OOooOo00Ooo = ooO00oO0O
OOooOo00Ooo . inconsistent_registration = False
if 62 - 62: Oo0Ooo . II111iiii % OoO0O00 . Ii1I * OOooOOo + II111iiii
if 7 - 7: OOooOOo
if 22 - 22: Oo0Ooo + ooOoO0o
if 71 - 71: OOooOOo . Ii1I * i11iIiiIii . I11i
if 9 - 9: O0 / I1ii11iIi11i . iII111i . O0 + IiII % I11i
if ( ooO00oO0O . group . is_null ( ) ) :
if ( OOooOo00Ooo . site_id != o0OOO . site_id ) :
OOooOo00Ooo . site_id = o0OOO . site_id
OOooOo00Ooo . registered = False
OOooOo00Ooo . individual_registrations = { }
OOooOo00Ooo . registered_rlocs = [ ]
lisp_registered_count -= 1
if 27 - 27: i11iIiiIii - I1ii11iIi11i / O0 - i1IIi + I1IiiI * iII111i
if 26 - 26: Oo0Ooo . Ii1I
if 7 - 7: OoOoOO00 - o0oOOo0O0Ooo + oO0o
Iiii11 = source . address + o0OOO . xtr_id
if ( ooO00oO0O . individual_registrations . has_key ( Iiii11 ) ) :
ooO00oO0O = ooO00oO0O . individual_registrations [ Iiii11 ]
else :
ooO00oO0O = lisp_site_eid ( iIoo )
ooO00oO0O . eid . copy_address ( OOooOo00Ooo . eid )
ooO00oO0O . group . copy_address ( OOooOo00Ooo . group )
OOooOo00Ooo . individual_registrations [ Iiii11 ] = ooO00oO0O
if 8 - 8: iIii1I11I1II1
else :
ooO00oO0O . inconsistent_registration = ooO00oO0O . merge_register_requested
if 6 - 6: oO0o
if 51 - 51: I1Ii111 - o0oOOo0O0Ooo
if 5 - 5: O0
ooO00oO0O . map_registers_received += 1
if 7 - 7: OoOoOO00 + OoO0O00 * I1IiiI
if 63 - 63: I1ii11iIi11i + iII111i * i1IIi
if 63 - 63: I1ii11iIi11i / II111iiii % oO0o + ooOoO0o . Ii1I % I11i
if 59 - 59: I1Ii111 % o0oOOo0O0Ooo - I1IiiI * i1IIi
if 5 - 5: I1IiiI
iiO0 = ( ooO00oO0O . is_rloc_in_rloc_set ( source ) == False )
if ( IiII1iiI . record_ttl == 0 and iiO0 ) :
lprint ( " Ignore deregistration request from {}" . format ( red ( source . print_address_no_iid ( ) , False ) ) )
if 22 - 22: II111iiii / iII111i
continue
if 18 - 18: i11iIiiIii * ooOoO0o . I1IiiI + i1IIi + I11i
if 62 - 62: O0 % o0oOOo0O0Ooo + iIii1I11I1II1 + iIii1I11I1II1 * ooOoO0o
if 21 - 21: o0oOOo0O0Ooo % O0
if 81 - 81: i1IIi + i1IIi
if 3 - 3: I1Ii111 . I1ii11iIi11i * iII111i * i11iIiiIii * IiII
if 52 - 52: iIii1I11I1II1 % o0oOOo0O0Ooo % I1IiiI
oo0OOo = ooO00oO0O . registered_rlocs
ooO00oO0O . registered_rlocs = [ ]
if 14 - 14: OoO0O00
if 11 - 11: ooOoO0o * IiII * I1Ii111 * ooOoO0o
if 92 - 92: I1IiiI
if 94 - 94: OoOoOO00 % OoOoOO00 . i11iIiiIii
ii11iIi11 = packet
for o0000o0O0ooo in range ( IiII1iiI . rloc_count ) :
o00o = lisp_rloc_record ( )
packet = o00o . decode ( packet , None )
if ( packet == None ) :
lprint ( " Could not decode RLOC-record in Map-Register packet" )
return
if 43 - 43: OoOoOO00 / I1IiiI * OoO0O00 / Oo0Ooo
o00o . print_record ( " " )
if 59 - 59: I11i % i1IIi % Oo0Ooo % Oo0Ooo
if 91 - 91: I11i
if 98 - 98: I11i - II111iiii . IiII % Oo0Ooo
if 65 - 65: OoO0O00
if ( len ( iIoo . allowed_rlocs ) > 0 ) :
ooOOo0o = o00o . rloc . print_address ( )
if ( iIoo . allowed_rlocs . has_key ( ooOOo0o ) == False ) :
lprint ( ( " Reject registration, RLOC {} not " + "configured in allowed RLOC-set" ) . format ( red ( ooOOo0o , False ) ) )
if 65 - 65: oO0o
if 77 - 77: I11i * i1IIi - OOooOOo / OoOoOO00
ooO00oO0O . registered = False
packet = o00o . end_of_rlocs ( packet ,
IiII1iiI . rloc_count - o0000o0O0ooo - 1 )
break
if 50 - 50: O0 - oO0o . oO0o
if 98 - 98: IiII % Ii1I / Ii1I
if 10 - 10: Ii1I
if 69 - 69: I1Ii111 * OoooooooOO . o0oOOo0O0Ooo % I1IiiI
if 70 - 70: iII111i . i11iIiiIii * I1Ii111
if 54 - 54: o0oOOo0O0Ooo . i1IIi / iII111i
Oo0o0o0oo = lisp_rloc ( )
Oo0o0o0oo . store_rloc_from_record ( o00o , None , source )
if 21 - 21: O0 + ooOoO0o
if 53 - 53: Ii1I - II111iiii * iIii1I11I1II1
if 91 - 91: OoOoOO00 % iIii1I11I1II1
if 81 - 81: i11iIiiIii / OoOoOO00 + iIii1I11I1II1
if 65 - 65: o0oOOo0O0Ooo
if 73 - 73: I11i . I1ii11iIi11i - OoO0O00 + OoooooooOO
if ( source . is_exact_match ( Oo0o0o0oo . rloc ) ) :
Oo0o0o0oo . map_notify_requested = o0OOO . map_notify_requested
if 71 - 71: I1IiiI
if 27 - 27: OoO0O00 + i1IIi * OoooooooOO * iIii1I11I1II1 - Ii1I
if 85 - 85: OoO0O00 + II111iiii / OoO0O00 . II111iiii * OoOoOO00 * I1IiiI
if 19 - 19: iII111i / Ii1I + iIii1I11I1II1 * O0 - Oo0Ooo
if 47 - 47: iIii1I11I1II1 % I1ii11iIi11i
ooO00oO0O . registered_rlocs . append ( Oo0o0o0oo )
if 33 - 33: oO0o . oO0o / IiII + II111iiii
if 34 - 34: OoO0O00 . OoOoOO00 / i1IIi / OOooOOo
iIii111 = ( ooO00oO0O . do_rloc_sets_match ( oo0OOo ) == False )
if 37 - 37: Ii1I * o0oOOo0O0Ooo
if 39 - 39: OoooooooOO
if 37 - 37: OoO0O00 . iII111i
if 32 - 32: II111iiii
if 11 - 11: i11iIiiIii - OOooOOo . i1IIi + OOooOOo - O0
if 17 - 17: i1IIi % o0oOOo0O0Ooo % ooOoO0o / I11i
if ( o0OOO . map_register_refresh and iIii111 and
ooO00oO0O . registered ) :
lprint ( " Reject registration, refreshes cannot change RLOC-set" )
ooO00oO0O . registered_rlocs = oo0OOo
continue
if 68 - 68: OoOoOO00
if 14 - 14: iIii1I11I1II1 + oO0o / ooOoO0o
if 20 - 20: I1ii11iIi11i . II111iiii % I1Ii111 + I1Ii111 / OoooooooOO . Ii1I
if 98 - 98: OoooooooOO - i11iIiiIii - iII111i + Ii1I - I1IiiI
if 75 - 75: OOooOOo
if 25 - 25: iII111i / I1ii11iIi11i - ooOoO0o
if ( ooO00oO0O . registered == False ) :
ooO00oO0O . first_registered = lisp_get_timestamp ( )
lisp_registered_count += 1
if 53 - 53: IiII / OoooooooOO / ooOoO0o + Oo0Ooo - OOooOOo - iIii1I11I1II1
ooO00oO0O . last_registered = lisp_get_timestamp ( )
ooO00oO0O . registered = ( IiII1iiI . record_ttl != 0 )
ooO00oO0O . last_registerer = source
if 53 - 53: OOooOOo . I1IiiI . o0oOOo0O0Ooo / o0oOOo0O0Ooo
if 40 - 40: OoooooooOO + iII111i % I1Ii111 . ooOoO0o
if 2 - 2: ooOoO0o
if 55 - 55: I11i + i1IIi * OoOoOO00 % Oo0Ooo * II111iiii . I1IiiI
ooO00oO0O . auth_sha1_or_sha2 = II1iI111i11
ooO00oO0O . proxy_reply_requested = o0OOO . proxy_reply_requested
ooO00oO0O . lisp_sec_present = o0OOO . lisp_sec_present
ooO00oO0O . map_notify_requested = o0OOO . map_notify_requested
ooO00oO0O . mobile_node_requested = o0OOO . mobile_node
ooO00oO0O . merge_register_requested = o0OOO . merge_register_requested
if 98 - 98: I1ii11iIi11i
ooO00oO0O . use_register_ttl_requested = o0OOO . use_ttl_for_timeout
if ( ooO00oO0O . use_register_ttl_requested ) :
ooO00oO0O . register_ttl = IiII1iiI . store_ttl ( )
else :
ooO00oO0O . register_ttl = LISP_SITE_TIMEOUT_CHECK_INTERVAL * 3
if 57 - 57: OOooOOo * I11i . oO0o
ooO00oO0O . xtr_id_present = o0OOO . xtr_id_present
if ( ooO00oO0O . xtr_id_present ) :
ooO00oO0O . xtr_id = o0OOO . xtr_id
ooO00oO0O . site_id = o0OOO . site_id
if 17 - 17: iII111i - OOooOOo * I1IiiI + i1IIi % I1ii11iIi11i
if 71 - 71: Ii1I - o0oOOo0O0Ooo - oO0o
if 27 - 27: O0 - iIii1I11I1II1
if 78 - 78: Oo0Ooo / o0oOOo0O0Ooo
if 35 - 35: o0oOOo0O0Ooo . OoO0O00 / o0oOOo0O0Ooo / IiII - I1ii11iIi11i . Oo0Ooo
if ( o0OOO . merge_register_requested ) :
if ( OOooOo00Ooo . merge_in_site_eid ( ooO00oO0O ) ) :
OoooO0oOO . append ( [ IiII1iiI . eid , IiII1iiI . group ] )
if 97 - 97: i11iIiiIii + I1ii11iIi11i - I11i . oO0o
if ( o0OOO . map_notify_requested ) :
lisp_send_merged_map_notify ( lisp_sockets , OOooOo00Ooo , o0OOO ,
IiII1iiI )
if 76 - 76: IiII * II111iiii * I1ii11iIi11i + OoooooooOO - OoOoOO00 . Ii1I
if 51 - 51: II111iiii % I1Ii111 * O0 . ooOoO0o * OoOoOO00
if 17 - 17: I1IiiI % I11i
if ( iIii111 == False ) : continue
if ( len ( OoooO0oOO ) != 0 ) : continue
if 28 - 28: I1ii11iIi11i * OoooooooOO
oooOoooOO0Oo0 . append ( ooO00oO0O . print_eid_tuple ( ) )
if 19 - 19: Oo0Ooo - iII111i % OoOoOO00 * i11iIiiIii / oO0o . i11iIiiIii
if 46 - 46: I1ii11iIi11i
if 50 - 50: OOooOOo * OoO0O00 * OOooOOo % I1IiiI - I1Ii111 * Ii1I
if 88 - 88: OOooOOo . iII111i / I11i
if 1 - 1: iIii1I11I1II1 - Oo0Ooo % OoooooooOO
if 71 - 71: OOooOOo - Ii1I
if 68 - 68: ooOoO0o
IiII1iiI = IiII1iiI . encode ( )
IiII1iiI += ii11iIi11
IIIo000 = [ ooO00oO0O . print_eid_tuple ( ) ]
lprint ( " Changed RLOC-set, Map-Notifying old RLOC-set" )
if 35 - 35: IiII . iIii1I11I1II1 + Ii1I % O0
for Oo0o0o0oo in oo0OOo :
if ( Oo0o0o0oo . map_notify_requested == False ) : continue
if ( Oo0o0o0oo . rloc . is_exact_match ( source ) ) : continue
lisp_build_map_notify ( lisp_sockets , IiII1iiI , IIIo000 , 1 , Oo0o0o0oo . rloc ,
LISP_CTRL_PORT , o0OOO . nonce , o0OOO . key_id ,
o0OOO . alg_id , o0OOO . auth_len , iIoo , False )
if 94 - 94: OoOoOO00 + II111iiii . II111iiii + ooOoO0o + ooOoO0o
if 95 - 95: iIii1I11I1II1 / i11iIiiIii - IiII - OOooOOo
if 4 - 4: II111iiii + oO0o + o0oOOo0O0Ooo % IiII % iIii1I11I1II1
if 68 - 68: i11iIiiIii
if 79 - 79: OoOoOO00 * Ii1I / I1ii11iIi11i + OOooOOo
lisp_notify_subscribers ( lisp_sockets , IiII1iiI , ooO00oO0O . eid , iIoo )
if 19 - 19: I1IiiI + I11i + I1IiiI + OoO0O00
if 33 - 33: i11iIiiIii - Ii1I * II111iiii
if 97 - 97: OoO0O00 / o0oOOo0O0Ooo * iIii1I11I1II1
if 5 - 5: I1IiiI
if 27 - 27: i1IIi + oO0o / I1ii11iIi11i + oO0o
if ( len ( OoooO0oOO ) != 0 ) :
lisp_queue_multicast_map_notify ( lisp_sockets , OoooO0oOO )
if 98 - 98: II111iiii + iIii1I11I1II1
if 70 - 70: I11i / OoooooooOO / i11iIiiIii
if 61 - 61: O0 . Oo0Ooo . iIii1I11I1II1
if 54 - 54: OOooOOo * I1ii11iIi11i + OoooooooOO
if 58 - 58: i1IIi - OoooooooOO * OOooOOo . ooOoO0o + O0 + o0oOOo0O0Ooo
if 87 - 87: OOooOOo + I1Ii111 + O0 / oO0o / i11iIiiIii
if ( o0OOO . merge_register_requested ) : return
if 60 - 60: O0 . II111iiii
if 69 - 69: II111iiii / ooOoO0o - OoOoOO00 / OOooOOo
if 52 - 52: OoO0O00 % I11i + o0oOOo0O0Ooo % OoOoOO00
if 46 - 46: o0oOOo0O0Ooo % O0
if 30 - 30: oO0o
if ( o0OOO . map_notify_requested and iIoo != None ) :
lisp_build_map_notify ( lisp_sockets , I1IIiIiii , oooOoooOO0Oo0 ,
o0OOO . record_count , source , sport , o0OOO . nonce ,
o0OOO . key_id , o0OOO . alg_id , o0OOO . auth_len ,
iIoo , True )
if 64 - 64: O0
return
if 70 - 70: oO0o % I1IiiI . iIii1I11I1II1 - Oo0Ooo + OoOoOO00 % O0
if 91 - 91: I1Ii111 - oO0o * ooOoO0o - I1ii11iIi11i + IiII + O0
if 18 - 18: OoOoOO00 / IiII / o0oOOo0O0Ooo . OOooOOo
if 35 - 35: I11i . ooOoO0o % I11i / iII111i / O0 % I11i
if 29 - 29: I1Ii111 + Ii1I
if 100 - 100: Ii1I + I1Ii111 / iIii1I11I1II1 / i1IIi % OoOoOO00
if 6 - 6: oO0o + ooOoO0o
if 13 - 13: Oo0Ooo . IiII % iII111i + i1IIi / OOooOOo
if 1 - 1: I11i * i1IIi * Oo0Ooo % O0
if 41 - 41: OOooOOo % OoOoOO00
def lisp_process_multicast_map_notify ( packet , source ) :
oO0o0ooo = lisp_map_notify ( "" )
packet = oO0o0ooo . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Notify packet" )
return
if 82 - 82: I11i . IiII
if 27 - 27: I1Ii111 % O0 * OoooooooOO . Oo0Ooo
oO0o0ooo . print_notify ( )
if ( oO0o0ooo . record_count == 0 ) : return
if 51 - 51: I11i
oOO000 = oO0o0ooo . eid_records
if 34 - 34: OoooooooOO . I1IiiI . Oo0Ooo % iII111i
for II11iIII1i1I in range ( oO0o0ooo . record_count ) :
IiII1iiI = lisp_eid_record ( )
oOO000 = IiII1iiI . decode ( oOO000 )
if ( packet == None ) : return
IiII1iiI . print_record ( " " , False )
if 24 - 24: ooOoO0o * oO0o * Oo0Ooo . oO0o - OoOoOO00
if 85 - 85: II111iiii
if 51 - 51: Oo0Ooo
if 57 - 57: i1IIi * ooOoO0o + o0oOOo0O0Ooo + O0 - I1ii11iIi11i % IiII
ooooOoo000O = lisp_map_cache_lookup ( IiII1iiI . eid , IiII1iiI . group )
if ( ooooOoo000O == None ) :
ooooOoo000O = lisp_mapping ( IiII1iiI . eid , IiII1iiI . group , [ ] )
ooooOoo000O . add_cache ( )
if 62 - 62: Ii1I / i11iIiiIii - I11i * ooOoO0o + iII111i
if 85 - 85: oO0o . iIii1I11I1II1 % i11iIiiIii - i11iIiiIii % IiII / Oo0Ooo
ooooOoo000O . mapping_source = None if source == "lisp-etr" else source
ooooOoo000O . map_cache_ttl = IiII1iiI . store_ttl ( )
if 11 - 11: OoO0O00 . I1IiiI * I1ii11iIi11i / ooOoO0o - i11iIiiIii
if 40 - 40: I1ii11iIi11i + I11i * OoooooooOO % OoooooooOO
if 19 - 19: Oo0Ooo . OOooOOo
if 58 - 58: IiII % iII111i + i1IIi % I1IiiI % OOooOOo . iII111i
if 85 - 85: i11iIiiIii . o0oOOo0O0Ooo * iII111i . I1ii11iIi11i / I1Ii111 % Ii1I
if ( len ( ooooOoo000O . rloc_set ) != 0 and IiII1iiI . rloc_count == 0 ) :
ooooOoo000O . rloc_set = [ ]
ooooOoo000O . build_best_rloc_set ( )
lisp_write_ipc_map_cache ( True , ooooOoo000O )
lprint ( "Update {} map-cache entry with no RLOC-set" . format ( green ( ooooOoo000O . print_eid_tuple ( ) , False ) ) )
if 27 - 27: II111iiii . iIii1I11I1II1 / I1ii11iIi11i / i1IIi / iIii1I11I1II1
continue
if 70 - 70: i11iIiiIii . OoO0O00 / OoooooooOO * OoooooooOO - OOooOOo
if 34 - 34: I1ii11iIi11i * i1IIi % OoooooooOO / I1IiiI
III11i1 = ooooOoo000O . rtrs_in_rloc_set ( )
if 80 - 80: o0oOOo0O0Ooo * ooOoO0o
if 87 - 87: I1Ii111 + O0 / I1ii11iIi11i / OoOoOO00 . Oo0Ooo - IiII
if 24 - 24: OoOoOO00
if 19 - 19: ooOoO0o
if 43 - 43: O0 . I1Ii111 % OoooooooOO / I1IiiI . o0oOOo0O0Ooo - OoOoOO00
for o0000o0O0ooo in range ( IiII1iiI . rloc_count ) :
o00o = lisp_rloc_record ( )
oOO000 = o00o . decode ( oOO000 , None )
o00o . print_record ( " " )
if ( IiII1iiI . group . is_null ( ) ) : continue
if ( o00o . rle == None ) : continue
if 46 - 46: I11i - OoooooooOO % o0oOOo0O0Ooo
if 7 - 7: OoooooooOO - I1Ii111 * IiII
if 20 - 20: o0oOOo0O0Ooo . OoooooooOO * I1IiiI . Oo0Ooo * OoOoOO00
if 3 - 3: I1Ii111 % i11iIiiIii % O0 % II111iiii
if 8 - 8: OoooooooOO * ooOoO0o
iiIIi11 = ooooOoo000O . rloc_set [ 0 ] . stats if len ( ooooOoo000O . rloc_set ) != 0 else None
if 70 - 70: I1Ii111 / oO0o % OoooooooOO
if 65 - 65: I1Ii111 . I1ii11iIi11i * iII111i
if 89 - 89: o0oOOo0O0Ooo / I1Ii111 - oO0o + iII111i % I1IiiI - Ii1I
if 58 - 58: OoOoOO00 + O0 - OoooooooOO % OoOoOO00 % i1IIi
Oo0o0o0oo = lisp_rloc ( )
Oo0o0o0oo . store_rloc_from_record ( o00o , None , ooooOoo000O . mapping_source )
if ( iiIIi11 != None ) : Oo0o0o0oo . stats = copy . deepcopy ( iiIIi11 )
if 75 - 75: OoOoOO00 . IiII - OoO0O00 . o0oOOo0O0Ooo % II111iiii
if ( III11i1 and Oo0o0o0oo . is_rtr ( ) == False ) : continue
if 69 - 69: Ii1I % OoooooooOO
ooooOoo000O . rloc_set = [ Oo0o0o0oo ]
ooooOoo000O . build_best_rloc_set ( )
lisp_write_ipc_map_cache ( True , ooooOoo000O )
if 62 - 62: Oo0Ooo / oO0o
lprint ( "Update {} map-cache entry with RLE {}" . format ( green ( ooooOoo000O . print_eid_tuple ( ) , False ) , Oo0o0o0oo . rle . print_rle ( False ) ) )
if 87 - 87: oO0o
if 39 - 39: iII111i
if 46 - 46: i11iIiiIii * iII111i / Oo0Ooo % OOooOOo % oO0o / Ii1I
return
if 75 - 75: Ii1I
if 37 - 37: I1IiiI / OoO0O00 . OoO0O00 + i11iIiiIii - oO0o
if 57 - 57: I1IiiI . OoO0O00
if 49 - 49: II111iiii + iII111i
if 85 - 85: I11i / i11iIiiIii
if 33 - 33: iIii1I11I1II1 % O0 + II111iiii * OOooOOo . Ii1I * iII111i
if 48 - 48: I11i * iIii1I11I1II1 / oO0o
if 34 - 34: i1IIi + oO0o * Oo0Ooo * I1Ii111 % OoooooooOO % ooOoO0o
def lisp_process_map_notify ( lisp_sockets , orig_packet , source ) :
oO0o0ooo = lisp_map_notify ( "" )
oOo = oO0o0ooo . decode ( orig_packet )
if ( oOo == None ) :
lprint ( "Could not decode Map-Notify packet" )
return
if 17 - 17: I1ii11iIi11i + o0oOOo0O0Ooo / OoO0O00 . Oo0Ooo - o0oOOo0O0Ooo / oO0o
if 87 - 87: ooOoO0o
oO0o0ooo . print_notify ( )
if 74 - 74: i11iIiiIii . i11iIiiIii . iIii1I11I1II1
if 100 - 100: i11iIiiIii - oO0o + iIii1I11I1II1 * OoOoOO00 % OOooOOo % i11iIiiIii
if 26 - 26: O0
if 97 - 97: OOooOOo + I11i % I1Ii111 % i11iIiiIii / I1ii11iIi11i
if 21 - 21: O0 + iIii1I11I1II1 / i11iIiiIii . OOooOOo * i1IIi
o00oOOO = source . print_address ( )
if ( oO0o0ooo . alg_id != 0 or oO0o0ooo . auth_len != 0 ) :
ooooOOoO = None
for Iiii11 in lisp_map_servers_list :
if ( Iiii11 . find ( o00oOOO ) == - 1 ) : continue
ooooOOoO = lisp_map_servers_list [ Iiii11 ]
if 3 - 3: i1IIi % o0oOOo0O0Ooo + OoOoOO00
if ( ooooOOoO == None ) :
lprint ( ( " Could not find Map-Server {} to authenticate " + "Map-Notify" ) . format ( o00oOOO ) )
if 32 - 32: OoO0O00 . Oo0Ooo * iIii1I11I1II1
return
if 12 - 12: O0 + I1ii11iIi11i + I11i . I1Ii111
if 48 - 48: Ii1I . iIii1I11I1II1 - iIii1I11I1II1 * I11i . OoooooooOO
ooooOOoO . map_notifies_received += 1
if 73 - 73: Ii1I / II111iiii - iIii1I11I1II1 . ooOoO0o * II111iiii . OOooOOo
oO0O = lisp_verify_auth ( oOo , oO0o0ooo . alg_id ,
oO0o0ooo . auth_data , ooooOOoO . password )
if 50 - 50: iIii1I11I1II1 + OoOoOO00 % O0 + OoO0O00 . i11iIiiIii / oO0o
lprint ( " Authentication {} for Map-Notify" . format ( "succeeded" if oO0O else "failed" ) )
if 31 - 31: I1IiiI % o0oOOo0O0Ooo . i11iIiiIii % OOooOOo - iIii1I11I1II1
if ( oO0O == False ) : return
else :
ooooOOoO = lisp_ms ( o00oOOO , None , "" , 0 , "" , False , False , False , False , 0 , 0 , 0 ,
None )
if 77 - 77: i11iIiiIii / OOooOOo
if 93 - 93: I1ii11iIi11i - iII111i % O0 - Ii1I
if 84 - 84: I1ii11iIi11i . iIii1I11I1II1 % IiII * I11i + ooOoO0o
if 59 - 59: oO0o * OoO0O00 - I11i * I1IiiI
if 60 - 60: iII111i - OoooooooOO / iII111i % OoO0O00 . OoOoOO00 - o0oOOo0O0Ooo
if 71 - 71: iII111i * o0oOOo0O0Ooo * i11iIiiIii * O0
oOO000 = oO0o0ooo . eid_records
if ( oO0o0ooo . record_count == 0 ) :
lisp_send_map_notify_ack ( lisp_sockets , oOO000 , oO0o0ooo , ooooOOoO )
return
if 77 - 77: OOooOOo % iII111i + I11i / OoOoOO00
if 50 - 50: OoOoOO00 - i11iIiiIii - OOooOOo . iIii1I11I1II1
if 97 - 97: oO0o % OOooOOo . OoooooooOO * Ii1I
if 100 - 100: I1ii11iIi11i / Ii1I % Oo0Ooo
if 83 - 83: O0 . I1Ii111 % I1ii11iIi11i
if 97 - 97: Oo0Ooo % OoO0O00 * I1ii11iIi11i * ooOoO0o * OoO0O00
if 12 - 12: ooOoO0o
if 56 - 56: i1IIi
IiII1iiI = lisp_eid_record ( )
oOo = IiII1iiI . decode ( oOO000 )
if ( oOo == None ) : return
if 3 - 3: OOooOOo - Oo0Ooo * Ii1I + i11iIiiIii
IiII1iiI . print_record ( " " , False )
if 53 - 53: i1IIi % I1ii11iIi11i
for o0000o0O0ooo in range ( IiII1iiI . rloc_count ) :
o00o = lisp_rloc_record ( )
oOo = o00o . decode ( oOo , None )
if ( oOo == None ) :
lprint ( " Could not decode RLOC-record in Map-Notify packet" )
return
if 65 - 65: I11i + OoOoOO00 - i11iIiiIii
o00o . print_record ( " " )
if 72 - 72: i11iIiiIii - iII111i . i11iIiiIii
if 61 - 61: oO0o . i11iIiiIii / Ii1I % iII111i
if 36 - 36: OoO0O00 + Ii1I / I11i - iII111i % OoO0O00 / Oo0Ooo
if 38 - 38: Ii1I - ooOoO0o - O0 + oO0o . iIii1I11I1II1
if 90 - 90: i1IIi * OoOoOO00
if ( IiII1iiI . group . is_null ( ) == False ) :
if 27 - 27: iIii1I11I1II1
if 95 - 95: iII111i / ooOoO0o % Ii1I
if 44 - 44: OOooOOo . OOooOOo
if 5 - 5: oO0o + OoooooooOO
if 88 - 88: oO0o + OOooOOo
lprint ( "Send {} Map-Notify IPC message to ITR process" . format ( green ( IiII1iiI . print_eid_tuple ( ) , False ) ) )
if 14 - 14: I11i / i1IIi
if 56 - 56: OoooooooOO
oOooOOoo = lisp_control_packet_ipc ( orig_packet , o00oOOO , "lisp-itr" , 0 )
lisp_ipc ( oOooOOoo , lisp_sockets [ 2 ] , "lisp-core-pkt" )
if 59 - 59: I1ii11iIi11i + OoO0O00
if 37 - 37: IiII * I1IiiI % O0
if 32 - 32: ooOoO0o % II111iiii
if 60 - 60: i11iIiiIii
if 11 - 11: o0oOOo0O0Ooo
lisp_send_map_notify_ack ( lisp_sockets , oOO000 , oO0o0ooo , ooooOOoO )
return
if 77 - 77: o0oOOo0O0Ooo / iIii1I11I1II1 * iIii1I11I1II1 / o0oOOo0O0Ooo * iII111i
if 26 - 26: Ii1I
if 1 - 1: OoOoOO00 . o0oOOo0O0Ooo + Oo0Ooo % Oo0Ooo * I1ii11iIi11i
if 50 - 50: IiII / i1IIi . I1ii11iIi11i
if 75 - 75: I11i * oO0o + OoooooooOO . iII111i + OoO0O00
if 44 - 44: II111iiii
if 65 - 65: I11i . iII111i . I1IiiI - Oo0Ooo % iIii1I11I1II1 / O0
if 54 - 54: iII111i - I1Ii111
def lisp_process_map_notify_ack ( packet , source ) :
oO0o0ooo = lisp_map_notify ( "" )
packet = oO0o0ooo . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Notify-Ack packet" )
return
if 88 - 88: iII111i * OoO0O00 % OoooooooOO / oO0o
if 7 - 7: i1IIi
oO0o0ooo . print_notify ( )
if 30 - 30: oO0o . i1IIi / I11i
if 23 - 23: i1IIi + oO0o % iII111i - OoO0O00 - i1IIi
if 74 - 74: Ii1I + I11i . OoooooooOO - I1ii11iIi11i
if 2 - 2: oO0o - o0oOOo0O0Ooo
if 80 - 80: i1IIi
if ( oO0o0ooo . record_count < 1 ) :
lprint ( "No EID-prefix found, cannot authenticate Map-Notify-Ack" )
return
if 40 - 40: O0 . ooOoO0o * iII111i . I11i + I1Ii111 % OoO0O00
if 9 - 9: IiII * oO0o - o0oOOo0O0Ooo
IiII1iiI = lisp_eid_record ( )
if 17 - 17: iII111i % Oo0Ooo
if ( IiII1iiI . decode ( oO0o0ooo . eid_records ) == None ) :
lprint ( "Could not decode EID-record, cannot authenticate " +
"Map-Notify-Ack" )
return
if 14 - 14: I1IiiI - I1Ii111 % I1IiiI - II111iiii
IiII1iiI . print_record ( " " , False )
if 34 - 34: I1ii11iIi11i * IiII / II111iiii / ooOoO0o * oO0o
oO00oo000O = IiII1iiI . print_eid_tuple ( )
if 3 - 3: II111iiii
if 61 - 61: oO0o . I1IiiI + i1IIi
if 69 - 69: O0 / i1IIi - OoOoOO00 + ooOoO0o - oO0o
if 80 - 80: o0oOOo0O0Ooo % O0 * I11i . i1IIi - ooOoO0o
if ( oO0o0ooo . alg_id != LISP_NONE_ALG_ID and oO0o0ooo . auth_len != 0 ) :
ooO00oO0O = lisp_sites_by_eid . lookup_cache ( IiII1iiI . eid , True )
if ( ooO00oO0O == None ) :
iIo0OO0O000 = bold ( "Site not found" , False )
lprint ( ( "{} for EID {}, cannot authenticate Map-Notify-Ack" ) . format ( iIo0OO0O000 , green ( oO00oo000O , False ) ) )
if 93 - 93: OoooooooOO / o0oOOo0O0Ooo
return
if 61 - 61: II111iiii / i1IIi . I1ii11iIi11i % iIii1I11I1II1
iIoo = ooO00oO0O . site
if 66 - 66: iIii1I11I1II1 % OoOoOO00 + i1IIi * i11iIiiIii * OoooooooOO
if 36 - 36: iII111i - OoO0O00 + I1IiiI + Ii1I . OoooooooOO
if 75 - 75: oO0o * Oo0Ooo * O0
if 22 - 22: ooOoO0o / OoooooooOO . II111iiii / Ii1I * OoO0O00 . i1IIi
iIoo . map_notify_acks_received += 1
if 62 - 62: oO0o % Ii1I - Ii1I
OoooOOo0oOO = oO0o0ooo . key_id
if ( iIoo . auth_key . has_key ( OoooOOo0oOO ) == False ) : OoooOOo0oOO = 0
O0Ooo0 = iIoo . auth_key [ OoooOOo0oOO ]
if 16 - 16: OoO0O00 - O0 - OOooOOo - I11i % OoOoOO00
oO0O = lisp_verify_auth ( packet , oO0o0ooo . alg_id ,
oO0o0ooo . auth_data , O0Ooo0 )
if 7 - 7: I1Ii111 / OoOoOO00 . II111iiii
OoooOOo0oOO = "key-id {}" . format ( OoooOOo0oOO ) if OoooOOo0oOO == oO0o0ooo . key_id else "bad key-id {}" . format ( oO0o0ooo . key_id )
if 9 - 9: I11i . I11i . OoooooooOO
if 42 - 42: iII111i / oO0o / iII111i * OoO0O00
lprint ( " Authentication {} for Map-Notify-Ack, {}" . format ( "succeeded" if oO0O else "failed" , OoooOOo0oOO ) )
if 25 - 25: OoOoOO00 - II111iiii + II111iiii . Ii1I * II111iiii
if ( oO0O == False ) : return
if 12 - 12: IiII / Ii1I
if 54 - 54: Oo0Ooo + Ii1I % OoooooooOO * OOooOOo / OoOoOO00
if 39 - 39: I1IiiI % i11iIiiIii % Ii1I
if 59 - 59: ooOoO0o % OoO0O00 / I1IiiI - II111iiii + OoooooooOO * i11iIiiIii
if 58 - 58: IiII / Oo0Ooo + o0oOOo0O0Ooo
if ( oO0o0ooo . retransmit_timer ) : oO0o0ooo . retransmit_timer . cancel ( )
if 71 - 71: Ii1I - IiII
Ii11 = source . print_address ( )
Iiii11 = oO0o0ooo . nonce_key
if 2 - 2: OoOoOO00 % IiII % OoO0O00 . i1IIi / I1Ii111 - iIii1I11I1II1
if ( lisp_map_notify_queue . has_key ( Iiii11 ) ) :
oO0o0ooo = lisp_map_notify_queue . pop ( Iiii11 )
if ( oO0o0ooo . retransmit_timer ) : oO0o0ooo . retransmit_timer . cancel ( )
lprint ( "Dequeue Map-Notify from retransmit queue, key is: {}" . format ( Iiii11 ) )
if 88 - 88: Oo0Ooo * i1IIi % OOooOOo
else :
lprint ( "Map-Notify with nonce 0x{} queue entry not found for {}" . format ( oO0o0ooo . nonce_key , red ( Ii11 , False ) ) )
if 65 - 65: iII111i . oO0o
if 67 - 67: I1IiiI / iII111i / O0 % ooOoO0o - IiII / Ii1I
return
if 31 - 31: I11i - oO0o * ooOoO0o
if 64 - 64: I11i
if 41 - 41: I1Ii111 * OoooooooOO / OoOoOO00 + OoO0O00 . OoOoOO00 + I1Ii111
if 9 - 9: IiII . I11i . I1Ii111 / i1IIi * OoOoOO00 - O0
if 3 - 3: O0 / iIii1I11I1II1 % IiII + I11i
if 43 - 43: Oo0Ooo % I11i
if 53 - 53: OoOoOO00 % OoooooooOO * o0oOOo0O0Ooo % OoooooooOO
if 47 - 47: iIii1I11I1II1 - OOooOOo + I1ii11iIi11i * ooOoO0o + Oo0Ooo + OoO0O00
def lisp_map_referral_loop ( mr , eid , group , action , s ) :
if ( action not in ( LISP_DDT_ACTION_NODE_REFERRAL ,
LISP_DDT_ACTION_MS_REFERRAL ) ) : return ( False )
if 64 - 64: OoOoOO00 - OoOoOO00 . OoooooooOO + ooOoO0o
if ( mr . last_cached_prefix [ 0 ] == None ) : return ( False )
if 100 - 100: ooOoO0o . OoooooooOO % i1IIi % OoO0O00
if 26 - 26: OoOoOO00 * IiII
if 76 - 76: I1IiiI + IiII * I1ii11iIi11i * I1IiiI % Ii1I + ooOoO0o
if 46 - 46: OoOoOO00
oooOOOo0 = False
if ( group . is_null ( ) == False ) :
oooOOOo0 = mr . last_cached_prefix [ 1 ] . is_more_specific ( group )
if 66 - 66: iII111i - O0 . I1Ii111 * i1IIi / OoO0O00 / II111iiii
if ( oooOOOo0 == False ) :
oooOOOo0 = mr . last_cached_prefix [ 0 ] . is_more_specific ( eid )
if 35 - 35: ooOoO0o * OOooOOo / I11i % I11i / OoooooooOO . I1Ii111
if 70 - 70: I1ii11iIi11i % I1ii11iIi11i / oO0o
if ( oooOOOo0 ) :
I11Ii11ii = lisp_print_eid_tuple ( eid , group )
OOo0OOO0Ooo = lisp_print_eid_tuple ( mr . last_cached_prefix [ 0 ] ,
mr . last_cached_prefix [ 1 ] )
if 90 - 90: I1IiiI / I1Ii111 + Oo0Ooo / o0oOOo0O0Ooo + OOooOOo
lprint ( ( "Map-Referral prefix {} from {} is not more-specific " + "than cached prefix {}" ) . format ( green ( I11Ii11ii , False ) , s ,
# o0oOOo0O0Ooo / oO0o / ooOoO0o % I1IiiI / IiII - i11iIiiIii
OOo0OOO0Ooo ) )
if 41 - 41: Ii1I % Ii1I * oO0o - I11i + iIii1I11I1II1 . ooOoO0o
return ( oooOOOo0 )
if 30 - 30: Ii1I * iII111i . II111iiii / i1IIi
if 77 - 77: oO0o . IiII + I1ii11iIi11i . i1IIi
if 49 - 49: I1Ii111 . OoooooooOO / o0oOOo0O0Ooo - iII111i - iII111i - i11iIiiIii
if 37 - 37: OOooOOo
if 79 - 79: I1Ii111 - OoO0O00 + ooOoO0o + oO0o . i11iIiiIii + i1IIi
if 32 - 32: IiII . ooOoO0o / OoO0O00 / iII111i . iIii1I11I1II1 % IiII
if 28 - 28: I1Ii111 + OoooooooOO + IiII . ooOoO0o . I1IiiI / oO0o
def lisp_process_map_referral ( lisp_sockets , packet , source ) :
if 66 - 66: Ii1I - I11i + Oo0Ooo . ooOoO0o
OOOoo = lisp_map_referral ( )
packet = OOOoo . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Referral packet" )
return
if 89 - 89: IiII . II111iiii / OoO0O00 + I1ii11iIi11i * i11iIiiIii
OOOoo . print_map_referral ( )
if 85 - 85: o0oOOo0O0Ooo - Oo0Ooo / I1Ii111
o00oOOO = source . print_address ( )
i11III1I = OOOoo . nonce
if 100 - 100: OoO0O00 * iIii1I11I1II1 - IiII . i1IIi % i11iIiiIii % Oo0Ooo
if 22 - 22: ooOoO0o - OOooOOo
if 90 - 90: i11iIiiIii . i11iIiiIii - iIii1I11I1II1
if 20 - 20: ooOoO0o - i11iIiiIii
for II11iIII1i1I in range ( OOOoo . record_count ) :
IiII1iiI = lisp_eid_record ( )
packet = IiII1iiI . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode EID-record in Map-Referral packet" )
return
if 23 - 23: OoO0O00 + I1IiiI / I1ii11iIi11i * I1ii11iIi11i % ooOoO0o
IiII1iiI . print_record ( " " , True )
if 83 - 83: I1IiiI * i11iIiiIii - I1ii11iIi11i + I11i
if 33 - 33: OoO0O00 . OoooooooOO % iII111i / oO0o * Ii1I + ooOoO0o
if 29 - 29: oO0o
if 21 - 21: i11iIiiIii . o0oOOo0O0Ooo
Iiii11 = str ( i11III1I )
if ( Iiii11 not in lisp_ddt_map_requestQ ) :
lprint ( ( "Map-Referral nonce 0x{} from {} not found in " + "Map-Request queue, EID-record ignored" ) . format ( lisp_hex_string ( i11III1I ) , o00oOOO ) )
if 78 - 78: Oo0Ooo
if 77 - 77: oO0o % Oo0Ooo % O0
continue
if 51 - 51: IiII % IiII + OOooOOo . II111iiii / I1ii11iIi11i
Ii1IIi1III1i = lisp_ddt_map_requestQ [ Iiii11 ]
if ( Ii1IIi1III1i == None ) :
lprint ( ( "No Map-Request queue entry found for Map-Referral " +
"nonce 0x{} from {}, EID-record ignored" ) . format ( lisp_hex_string ( i11III1I ) , o00oOOO ) )
if 4 - 4: o0oOOo0O0Ooo % I1IiiI * o0oOOo0O0Ooo * OoOoOO00 - Ii1I
continue
if 61 - 61: OoooooooOO - OoOoOO00 . O0 / ooOoO0o . Ii1I
if 41 - 41: Oo0Ooo / OoOoOO00 % I1Ii111 - O0
if 19 - 19: I1IiiI % I1Ii111 - O0 . iIii1I11I1II1 . I11i % O0
if 88 - 88: ooOoO0o
if 52 - 52: iIii1I11I1II1 % ooOoO0o * iIii1I11I1II1
if 20 - 20: i11iIiiIii * I11i
if ( lisp_map_referral_loop ( Ii1IIi1III1i , IiII1iiI . eid , IiII1iiI . group ,
IiII1iiI . action , o00oOOO ) ) :
Ii1IIi1III1i . dequeue_map_request ( )
continue
if 29 - 29: IiII / OOooOOo
if 39 - 39: O0 + II111iiii
Ii1IIi1III1i . last_cached_prefix [ 0 ] = IiII1iiI . eid
Ii1IIi1III1i . last_cached_prefix [ 1 ] = IiII1iiI . group
if 94 - 94: OOooOOo % I1ii11iIi11i % O0 + iII111i
if 62 - 62: iIii1I11I1II1 . OoOoOO00 / iIii1I11I1II1 + IiII
if 31 - 31: Ii1I . OoO0O00 . Ii1I + OoO0O00 * iIii1I11I1II1 . iII111i
if 42 - 42: O0 / oO0o % O0 . i1IIi % OOooOOo
IiI1iiIi1I1i = False
I1IIiII1 = lisp_referral_cache_lookup ( IiII1iiI . eid , IiII1iiI . group ,
True )
if ( I1IIiII1 == None ) :
IiI1iiIi1I1i = True
I1IIiII1 = lisp_referral ( )
I1IIiII1 . eid = IiII1iiI . eid
I1IIiII1 . group = IiII1iiI . group
if ( IiII1iiI . ddt_incomplete == False ) : I1IIiII1 . add_cache ( )
elif ( I1IIiII1 . referral_source . not_set ( ) ) :
lprint ( "Do not replace static referral entry {}" . format ( green ( I1IIiII1 . print_eid_tuple ( ) , False ) ) )
if 13 - 13: I1IiiI % ooOoO0o + OOooOOo
Ii1IIi1III1i . dequeue_map_request ( )
continue
if 91 - 91: oO0o - ooOoO0o
if 20 - 20: i1IIi . IiII / o0oOOo0O0Ooo / I11i
O0oo0oo0 = IiII1iiI . action
I1IIiII1 . referral_source = source
I1IIiII1 . referral_type = O0oo0oo0
Ii1 = IiII1iiI . store_ttl ( )
I1IIiII1 . referral_ttl = Ii1
I1IIiII1 . expires = lisp_set_timestamp ( Ii1 )
if 27 - 27: ooOoO0o . ooOoO0o - Ii1I % i11iIiiIii
if 74 - 74: I1Ii111 - II111iiii % o0oOOo0O0Ooo
if 7 - 7: I1IiiI + OoooooooOO + o0oOOo0O0Ooo . OoooooooOO
if 29 - 29: iII111i * O0 + I1IiiI * IiII + iII111i - IiII
iI11iIii = I1IIiII1 . is_referral_negative ( )
if ( I1IIiII1 . referral_set . has_key ( o00oOOO ) ) :
IiOO00O00 = I1IIiII1 . referral_set [ o00oOOO ]
if 14 - 14: I1IiiI . o0oOOo0O0Ooo / I1Ii111
if ( IiOO00O00 . updown == False and iI11iIii == False ) :
IiOO00O00 . updown = True
lprint ( "Change up/down status for referral-node {} to up" . format ( o00oOOO ) )
if 67 - 67: OoooooooOO . oO0o * OoOoOO00 - OoooooooOO
elif ( IiOO00O00 . updown == True and iI11iIii == True ) :
IiOO00O00 . updown = False
lprint ( ( "Change up/down status for referral-node {} " + "to down, received negative referral" ) . format ( o00oOOO ) )
if 32 - 32: oO0o
if 72 - 72: I1IiiI
if 34 - 34: ooOoO0o % II111iiii / ooOoO0o
if 87 - 87: Oo0Ooo
if 7 - 7: iIii1I11I1II1
if 85 - 85: iIii1I11I1II1 . O0
if 43 - 43: II111iiii / OoOoOO00 + OOooOOo % Oo0Ooo * OOooOOo
if 62 - 62: ooOoO0o * OOooOOo . I11i + Oo0Ooo - I1Ii111
I11I1I1iiiIIi = { }
for Iiii11 in I1IIiII1 . referral_set : I11I1I1iiiIIi [ Iiii11 ] = None
if 63 - 63: I11i % I1ii11iIi11i / o0oOOo0O0Ooo
if 95 - 95: oO0o * I1IiiI / OOooOOo
if 79 - 79: O0 . iII111i . iII111i % ooOoO0o
if 74 - 74: ooOoO0o
for II11iIII1i1I in range ( IiII1iiI . rloc_count ) :
o00o = lisp_rloc_record ( )
packet = o00o . decode ( packet , None )
if ( packet == None ) :
lprint ( "Could not decode RLOC-record in Map-Referral packet" )
return
if 37 - 37: oO0o / i1IIi * iII111i - i1IIi
o00o . print_record ( " " )
if 12 - 12: OoO0O00 * IiII + OoOoOO00 * I1Ii111 % OoOoOO00 + OoOoOO00
if 12 - 12: I1ii11iIi11i % Ii1I * OoOoOO00 . iIii1I11I1II1 * I1Ii111 - OoOoOO00
if 33 - 33: OoO0O00 * I1IiiI / i1IIi
if 88 - 88: Ii1I / ooOoO0o - I11i % OoO0O00 * iII111i
ooOOo0o = o00o . rloc . print_address ( )
if ( I1IIiII1 . referral_set . has_key ( ooOOo0o ) == False ) :
IiOO00O00 = lisp_referral_node ( )
IiOO00O00 . referral_address . copy_address ( o00o . rloc )
I1IIiII1 . referral_set [ ooOOo0o ] = IiOO00O00
if ( o00oOOO == ooOOo0o and iI11iIii ) : IiOO00O00 . updown = False
else :
IiOO00O00 = I1IIiII1 . referral_set [ ooOOo0o ]
if ( I11I1I1iiiIIi . has_key ( ooOOo0o ) ) : I11I1I1iiiIIi . pop ( ooOOo0o )
if 47 - 47: i11iIiiIii + Oo0Ooo % oO0o % O0
IiOO00O00 . priority = o00o . priority
IiOO00O00 . weight = o00o . weight
if 98 - 98: oO0o - O0 / iII111i % oO0o % I1IiiI / i1IIi
if 61 - 61: ooOoO0o + II111iiii
if 54 - 54: OoOoOO00 * o0oOOo0O0Ooo . OoO0O00
if 53 - 53: oO0o % OoO0O00 / OoO0O00 / I11i * Oo0Ooo
if 13 - 13: i1IIi % iIii1I11I1II1 - iII111i - I1IiiI - IiII + iIii1I11I1II1
for Iiii11 in I11I1I1iiiIIi : I1IIiII1 . referral_set . pop ( Iiii11 )
if 22 - 22: IiII - OOooOOo + I1ii11iIi11i
oO00oo000O = I1IIiII1 . print_eid_tuple ( )
if 64 - 64: OoOoOO00
if ( IiI1iiIi1I1i ) :
if ( IiII1iiI . ddt_incomplete ) :
lprint ( "Suppress add {} to referral-cache" . format ( green ( oO00oo000O , False ) ) )
if 79 - 79: IiII
else :
lprint ( "Add {}, referral-count {} to referral-cache" . format ( green ( oO00oo000O , False ) , IiII1iiI . rloc_count ) )
if 65 - 65: Oo0Ooo - i11iIiiIii * OoOoOO00 . I1Ii111 . iIii1I11I1II1
if 48 - 48: iIii1I11I1II1 - oO0o / OoO0O00 + O0 . Ii1I + I1Ii111
else :
lprint ( "Replace {}, referral-count: {} in referral-cache" . format ( green ( oO00oo000O , False ) , IiII1iiI . rloc_count ) )
if 17 - 17: OoOoOO00 . Oo0Ooo - I1Ii111 / I1Ii111 + I11i % i1IIi
if 31 - 31: OoooooooOO . O0 / OoO0O00 . I1Ii111
if 41 - 41: OoooooooOO + iII111i . OOooOOo
if 73 - 73: oO0o + i1IIi + i11iIiiIii / I1ii11iIi11i
if 100 - 100: I1IiiI % ooOoO0o % OoooooooOO / i11iIiiIii + i11iIiiIii % IiII
if 39 - 39: Ii1I % o0oOOo0O0Ooo + OOooOOo / iIii1I11I1II1
if ( O0oo0oo0 == LISP_DDT_ACTION_DELEGATION_HOLE ) :
lisp_send_negative_map_reply ( Ii1IIi1III1i . lisp_sockets , I1IIiII1 . eid ,
I1IIiII1 . group , Ii1IIi1III1i . nonce , Ii1IIi1III1i . itr , Ii1IIi1III1i . sport , 15 , None , False )
Ii1IIi1III1i . dequeue_map_request ( )
if 40 - 40: iIii1I11I1II1 / iII111i % OOooOOo % i11iIiiIii
if 57 - 57: II111iiii % OoO0O00 * i1IIi
if ( O0oo0oo0 == LISP_DDT_ACTION_NOT_AUTH ) :
if ( Ii1IIi1III1i . tried_root ) :
lisp_send_negative_map_reply ( Ii1IIi1III1i . lisp_sockets , I1IIiII1 . eid ,
I1IIiII1 . group , Ii1IIi1III1i . nonce , Ii1IIi1III1i . itr , Ii1IIi1III1i . sport , 0 , None , False )
Ii1IIi1III1i . dequeue_map_request ( )
else :
lisp_send_ddt_map_request ( Ii1IIi1III1i , True )
if 19 - 19: ooOoO0o . iIii1I11I1II1 + I1ii11iIi11i + I1ii11iIi11i / o0oOOo0O0Ooo . Oo0Ooo
if 9 - 9: II111iiii % OoooooooOO
if 4 - 4: i1IIi * i11iIiiIii % OoooooooOO + OoOoOO00 . oO0o
if ( O0oo0oo0 == LISP_DDT_ACTION_MS_NOT_REG ) :
if ( I1IIiII1 . referral_set . has_key ( o00oOOO ) ) :
IiOO00O00 = I1IIiII1 . referral_set [ o00oOOO ]
IiOO00O00 . updown = False
if 95 - 95: I1ii11iIi11i * OoOoOO00 % o0oOOo0O0Ooo / O0 + ooOoO0o % OOooOOo
if ( len ( I1IIiII1 . referral_set ) == 0 ) :
Ii1IIi1III1i . dequeue_map_request ( )
else :
lisp_send_ddt_map_request ( Ii1IIi1III1i , False )
if 48 - 48: i1IIi + IiII - iIii1I11I1II1 . i11iIiiIii % OOooOOo + I1ii11iIi11i
if 95 - 95: ooOoO0o + OoOoOO00 . II111iiii + Ii1I
if 81 - 81: OoooooooOO / OOooOOo / Oo0Ooo
if ( O0oo0oo0 in ( LISP_DDT_ACTION_NODE_REFERRAL ,
LISP_DDT_ACTION_MS_REFERRAL ) ) :
if ( Ii1IIi1III1i . eid . is_exact_match ( IiII1iiI . eid ) ) :
if ( not Ii1IIi1III1i . tried_root ) :
lisp_send_ddt_map_request ( Ii1IIi1III1i , True )
else :
lisp_send_negative_map_reply ( Ii1IIi1III1i . lisp_sockets ,
I1IIiII1 . eid , I1IIiII1 . group , Ii1IIi1III1i . nonce , Ii1IIi1III1i . itr ,
Ii1IIi1III1i . sport , 15 , None , False )
Ii1IIi1III1i . dequeue_map_request ( )
if 26 - 26: iII111i
else :
lisp_send_ddt_map_request ( Ii1IIi1III1i , False )
if 93 - 93: Oo0Ooo + I1IiiI % OoOoOO00 / OOooOOo / I1ii11iIi11i
if 6 - 6: IiII
if 68 - 68: Oo0Ooo
if ( O0oo0oo0 == LISP_DDT_ACTION_MS_ACK ) : Ii1IIi1III1i . dequeue_map_request ( )
if 83 - 83: OOooOOo / iIii1I11I1II1 . OoO0O00 - oO0o % Oo0Ooo
return
if 30 - 30: Ii1I . OoOoOO00 / oO0o . OoO0O00
if 93 - 93: i11iIiiIii
if 33 - 33: i1IIi % OoooooooOO + Oo0Ooo % I1IiiI / ooOoO0o
if 40 - 40: IiII % IiII
if 9 - 9: I1IiiI * i1IIi + OOooOOo * OoOoOO00
if 8 - 8: iII111i
if 51 - 51: I1IiiI
if 72 - 72: ooOoO0o / I1ii11iIi11i . Ii1I * iII111i . iIii1I11I1II1
def lisp_process_ecm ( lisp_sockets , packet , source , ecm_port ) :
Ii1I111Ii = lisp_ecm ( 0 )
packet = Ii1I111Ii . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode ECM packet" )
return
if 35 - 35: OoO0O00 . OoOoOO00 % O0 * OoO0O00
if 68 - 68: OOooOOo
Ii1I111Ii . print_ecm ( )
if 87 - 87: IiII * IiII - OoO0O00 / I1ii11iIi11i + OOooOOo / i11iIiiIii
oooooOOo0Oo = lisp_control_header ( )
if ( oooooOOo0Oo . decode ( packet ) == None ) :
lprint ( "Could not decode control header" )
return
if 21 - 21: o0oOOo0O0Ooo / oO0o + oO0o + Oo0Ooo / o0oOOo0O0Ooo
if 39 - 39: i11iIiiIii - OoO0O00 - i11iIiiIii / OoooooooOO
IiI1IIi = oooooOOo0Oo . type
del ( oooooOOo0Oo )
if 23 - 23: OOooOOo / OoOoOO00 / OoooooooOO + i1IIi % OoooooooOO
if ( IiI1IIi != LISP_MAP_REQUEST ) :
lprint ( "Received ECM without Map-Request inside" )
return
if 15 - 15: o0oOOo0O0Ooo % I1ii11iIi11i / II111iiii
if 50 - 50: oO0o * Ii1I % I1Ii111
if 74 - 74: iIii1I11I1II1 - OOooOOo / I1Ii111 / ooOoO0o . oO0o % iIii1I11I1II1
if 91 - 91: o0oOOo0O0Ooo . o0oOOo0O0Ooo - Ii1I
if 60 - 60: i11iIiiIii . Oo0Ooo / iIii1I11I1II1 / II111iiii
IIiIi1i1iiIiii = Ii1I111Ii . udp_sport
lisp_process_map_request ( lisp_sockets , packet , source , ecm_port ,
Ii1I111Ii . source , IIiIi1i1iiIiii , Ii1I111Ii . ddt , - 1 )
return
if 62 - 62: OoooooooOO % OoO0O00 * O0 + OOooOOo
if 34 - 34: O0 % Oo0Ooo . II111iiii % I1IiiI - iIii1I11I1II1
if 20 - 20: i11iIiiIii % I1IiiI % OoOoOO00
if 85 - 85: I11i + OoOoOO00 * O0 * O0
if 92 - 92: i11iIiiIii
if 16 - 16: I11i . ooOoO0o - Oo0Ooo / OoO0O00 . i1IIi
if 59 - 59: ooOoO0o - ooOoO0o % I11i + OoO0O00
if 88 - 88: Ii1I - ooOoO0o . Oo0Ooo
if 83 - 83: I11i + Oo0Ooo . I1ii11iIi11i * I1ii11iIi11i
if 80 - 80: i1IIi * I11i - OOooOOo / II111iiii * iIii1I11I1II1
def lisp_send_map_register ( lisp_sockets , packet , map_register , ms ) :
if 42 - 42: OoOoOO00 . I11i % II111iiii
if 19 - 19: OoooooooOO
if 31 - 31: I11i . OoOoOO00 - O0 * iII111i % I1Ii111 - II111iiii
if 21 - 21: OOooOOo . Oo0Ooo - i1IIi
if 56 - 56: I11i
if 24 - 24: I1IiiI . I1IiiI % ooOoO0o
if 32 - 32: OOooOOo / i1IIi / OOooOOo
iIi11i1I11Ii = ms . map_server
if ( lisp_decent_push_configured and iIi11i1I11Ii . is_multicast_address ( ) and
( ms . map_registers_multicast_sent == 1 or ms . map_registers_sent == 1 ) ) :
iIi11i1I11Ii = copy . deepcopy ( iIi11i1I11Ii )
iIi11i1I11Ii . address = 0x7f000001
II1Iii = bold ( "Bootstrap" , False )
o0 = ms . map_server . print_address_no_iid ( )
lprint ( "{} mapping system for peer-group {}" . format ( II1Iii , o0 ) )
if 97 - 97: ooOoO0o * Oo0Ooo * OoooooooOO * I1IiiI
if 45 - 45: Oo0Ooo
if 27 - 27: oO0o / IiII - iIii1I11I1II1 / o0oOOo0O0Ooo % OOooOOo * iIii1I11I1II1
if 40 - 40: oO0o - II111iiii * OOooOOo % OoooooooOO
if 52 - 52: OOooOOo + OoO0O00
if 96 - 96: OOooOOo % O0 - Oo0Ooo % oO0o / I1IiiI . i1IIi
packet = lisp_compute_auth ( packet , map_register , ms . password )
if 42 - 42: i1IIi
if 52 - 52: OoO0O00 % iII111i % O0
if 11 - 11: i1IIi / i11iIiiIii + Ii1I % Oo0Ooo % O0
if 50 - 50: oO0o . I1Ii111
if 38 - 38: iIii1I11I1II1 . Ii1I
if ( ms . ekey != None ) :
i1iIII1i = ms . ekey . zfill ( 32 )
Ii1IiiiI1ii = "0" * 8
Oooo0ooOoo0 = chacha . ChaCha ( i1iIII1i , Ii1IiiiI1ii ) . encrypt ( packet [ 4 : : ] )
packet = packet [ 0 : 4 ] + Oooo0ooOoo0
Oo0ooo0Ooo = bold ( "Encrypt" , False )
lprint ( "{} Map-Register with key-id {}" . format ( Oo0ooo0Ooo , ms . ekey_id ) )
if 82 - 82: OOooOOo * Ii1I + I1ii11iIi11i . OoO0O00
if 15 - 15: O0
I1i1Ii1I1 = ""
if ( lisp_decent_pull_xtr_configured ( ) ) :
I1i1Ii1I1 = ", decent-index {}" . format ( bold ( ms . dns_name , False ) )
if 50 - 50: IiII * oO0o
if 15 - 15: iIii1I11I1II1 / I1IiiI * i11iIiiIii
lprint ( "Send Map-Register to map-server {}{}{}" . format ( iIi11i1I11Ii . print_address ( ) , ", ms-name '{}'" . format ( ms . ms_name ) , I1i1Ii1I1 ) )
if 40 - 40: iIii1I11I1II1
lisp_send ( lisp_sockets , iIi11i1I11Ii , LISP_CTRL_PORT , packet )
return
if 71 - 71: I1Ii111 % oO0o . iII111i + OoOoOO00
if 29 - 29: oO0o % O0 - iIii1I11I1II1
if 94 - 94: Oo0Ooo - I11i + I1IiiI / o0oOOo0O0Ooo / o0oOOo0O0Ooo
if 19 - 19: oO0o . o0oOOo0O0Ooo + IiII * Oo0Ooo / OOooOOo % oO0o
if 11 - 11: OoOoOO00 * Oo0Ooo / I11i * OOooOOo
if 15 - 15: ooOoO0o - OOooOOo / OoooooooOO
if 41 - 41: OoOoOO00 . iII111i . i1IIi + oO0o
if 60 - 60: oO0o * I1Ii111
def lisp_send_ipc_to_core ( lisp_socket , packet , dest , port ) :
oo = lisp_socket . getsockname ( )
dest = dest . print_address_no_iid ( )
if 81 - 81: oO0o - OOooOOo - oO0o
lprint ( "Send IPC {} bytes to {} {}, control-packet: {}" . format ( len ( packet ) , dest , port , lisp_format_packet ( packet ) ) )
if 54 - 54: oO0o % I11i
if 71 - 71: oO0o / I1ii11iIi11i . Ii1I % II111iiii
packet = lisp_control_packet_ipc ( packet , oo , dest , port )
lisp_ipc ( packet , lisp_socket , "lisp-core-pkt" )
return
if 22 - 22: iIii1I11I1II1 - OoooooooOO
if 8 - 8: ooOoO0o % i11iIiiIii
if 41 - 41: I1Ii111 . ooOoO0o - i11iIiiIii + Ii1I . OOooOOo . OoOoOO00
if 70 - 70: i1IIi % OoOoOO00 / iII111i + i11iIiiIii % ooOoO0o + IiII
if 58 - 58: OOooOOo / i11iIiiIii . Oo0Ooo % iII111i
if 92 - 92: OoOoOO00 / ooOoO0o % iII111i / iIii1I11I1II1
if 73 - 73: O0 % i11iIiiIii
if 16 - 16: O0
def lisp_send_map_reply ( lisp_sockets , packet , dest , port ) :
lprint ( "Send Map-Reply to {}" . format ( dest . print_address_no_iid ( ) ) )
lisp_send_ipc_to_core ( lisp_sockets [ 2 ] , packet , dest , port )
return
if 15 - 15: i1IIi % i11iIiiIii
if 18 - 18: Ii1I . OoO0O00 . iII111i * oO0o + O0
if 35 - 35: OoOoOO00 . oO0o / II111iiii
if 97 - 97: Ii1I + I1Ii111 / II111iiii
if 14 - 14: iII111i / IiII / oO0o
if 55 - 55: OoO0O00 % O0
if 92 - 92: OoooooooOO / O0
if 14 - 14: i11iIiiIii
def lisp_send_map_referral ( lisp_sockets , packet , dest , port ) :
lprint ( "Send Map-Referral to {}" . format ( dest . print_address ( ) ) )
lisp_send_ipc_to_core ( lisp_sockets [ 2 ] , packet , dest , port )
return
if 43 - 43: OOooOOo
if 79 - 79: iII111i % Oo0Ooo . i1IIi % ooOoO0o
if 93 - 93: OoOoOO00
if 49 - 49: i1IIi * OOooOOo % I11i * Ii1I . I1Ii111 * iIii1I11I1II1
if 72 - 72: ooOoO0o
if 63 - 63: Oo0Ooo . OoO0O00 . OoooooooOO / i1IIi
if 53 - 53: OOooOOo * O0 . iII111i
if 3 - 3: OoooooooOO * I1Ii111 * IiII - OOooOOo * I1Ii111
def lisp_send_map_notify ( lisp_sockets , packet , dest , port ) :
lprint ( "Send Map-Notify to xTR {}" . format ( dest . print_address ( ) ) )
lisp_send_ipc_to_core ( lisp_sockets [ 2 ] , packet , dest , port )
return
if 78 - 78: iII111i
if 80 - 80: i1IIi * I1IiiI + OOooOOo
if 91 - 91: I1IiiI % OoOoOO00 * Oo0Ooo / I1ii11iIi11i
if 57 - 57: i11iIiiIii / o0oOOo0O0Ooo . II111iiii
if 63 - 63: O0
if 64 - 64: i11iIiiIii / oO0o . oO0o - Oo0Ooo
if 48 - 48: i1IIi + I1ii11iIi11i + I1Ii111 - iII111i
def lisp_send_ecm ( lisp_sockets , packet , inner_source , inner_sport , inner_dest ,
outer_dest , to_etr = False , to_ms = False , ddt = False ) :
if 3 - 3: i1IIi + OoooooooOO * ooOoO0o + I1Ii111 % OOooOOo / IiII
if ( inner_source == None or inner_source . is_null ( ) ) :
inner_source = inner_dest
if 70 - 70: oO0o + i1IIi % o0oOOo0O0Ooo - I11i
if 74 - 74: i11iIiiIii
if 93 - 93: I1Ii111 % OOooOOo * I1IiiI % iII111i / iIii1I11I1II1 + OoO0O00
if 6 - 6: I11i
if 70 - 70: ooOoO0o + OoooooooOO % OoOoOO00 % oO0o / Ii1I . I11i
if 63 - 63: I1ii11iIi11i - ooOoO0o . OOooOOo / O0 . iIii1I11I1II1 - Ii1I
if ( lisp_nat_traversal ) :
O00o = lisp_get_any_translated_port ( )
if ( O00o != None ) : inner_sport = O00o
if 6 - 6: Ii1I
Ii1I111Ii = lisp_ecm ( inner_sport )
if 60 - 60: iII111i + I1IiiI
Ii1I111Ii . to_etr = to_etr if lisp_is_running ( "lisp-etr" ) else False
Ii1I111Ii . to_ms = to_ms if lisp_is_running ( "lisp-ms" ) else False
Ii1I111Ii . ddt = ddt
IiiI1Ii11i = Ii1I111Ii . encode ( packet , inner_source , inner_dest )
if ( IiiI1Ii11i == None ) :
lprint ( "Could not encode ECM message" )
return
if 11 - 11: o0oOOo0O0Ooo + iIii1I11I1II1 - OoooooooOO
Ii1I111Ii . print_ecm ( )
if 29 - 29: IiII
packet = IiiI1Ii11i + packet
if 22 - 22: I1IiiI * oO0o / Oo0Ooo
ooOOo0o = outer_dest . print_address_no_iid ( )
lprint ( "Send Encapsulated-Control-Message to {}" . format ( ooOOo0o ) )
iIi11i1I11Ii = lisp_convert_4to6 ( ooOOo0o )
lisp_send ( lisp_sockets , iIi11i1I11Ii , LISP_CTRL_PORT , packet )
return
if 40 - 40: I1ii11iIi11i . I1Ii111 / I1IiiI
if 60 - 60: I1IiiI % Ii1I / I1Ii111 + Ii1I
if 43 - 43: I1ii11iIi11i + I11i
if 83 - 83: II111iiii + o0oOOo0O0Ooo - I1Ii111
if 100 - 100: IiII - OoOoOO00 / I11i
if 33 - 33: I1Ii111 * OoOoOO00 . I1ii11iIi11i % I1Ii111
if 87 - 87: Oo0Ooo
LISP_AFI_GEO_COORD = - 3
LISP_AFI_IID_RANGE = - 2
LISP_AFI_ULTIMATE_ROOT = - 1
LISP_AFI_NONE = 0
LISP_AFI_IPV4 = 1
LISP_AFI_IPV6 = 2
LISP_AFI_MAC = 6
LISP_AFI_E164 = 8
LISP_AFI_NAME = 17
LISP_AFI_LCAF = 16387
if 65 - 65: ooOoO0o . I1IiiI
LISP_RLOC_UNKNOWN_STATE = 0
LISP_RLOC_UP_STATE = 1
LISP_RLOC_DOWN_STATE = 2
LISP_RLOC_UNREACH_STATE = 3
LISP_RLOC_NO_ECHOED_NONCE_STATE = 4
LISP_RLOC_ADMIN_DOWN_STATE = 5
if 51 - 51: IiII
LISP_AUTH_NONE = 0
LISP_AUTH_MD5 = 1
LISP_AUTH_SHA1 = 2
LISP_AUTH_SHA2 = 3
if 43 - 43: oO0o - I11i . i11iIiiIii
if 78 - 78: i11iIiiIii + Oo0Ooo * Ii1I - o0oOOo0O0Ooo % i11iIiiIii
if 30 - 30: I1IiiI % oO0o * OoooooooOO
if 64 - 64: I1IiiI
if 11 - 11: I1ii11iIi11i % iII111i / II111iiii % ooOoO0o % IiII
if 14 - 14: ooOoO0o / IiII . o0oOOo0O0Ooo
if 27 - 27: I1IiiI - OOooOOo . II111iiii * I1ii11iIi11i % ooOoO0o / I1IiiI
LISP_IPV4_HOST_MASK_LEN = 32
LISP_IPV6_HOST_MASK_LEN = 128
LISP_MAC_HOST_MASK_LEN = 48
LISP_E164_HOST_MASK_LEN = 60
if 90 - 90: o0oOOo0O0Ooo / I1ii11iIi11i - oO0o - Ii1I - I1IiiI + I1Ii111
if 93 - 93: I1IiiI - I11i . I1IiiI - iIii1I11I1II1
if 1 - 1: O0 . Ii1I % Ii1I + II111iiii . oO0o
if 24 - 24: o0oOOo0O0Ooo . I1Ii111 % O0
if 67 - 67: I1IiiI * Ii1I
if 64 - 64: OOooOOo
def byte_swap_64 ( address ) :
iIiIi1iI11iiI = ( ( address & 0x00000000000000ff ) << 56 ) | ( ( address & 0x000000000000ff00 ) << 40 ) | ( ( address & 0x0000000000ff0000 ) << 24 ) | ( ( address & 0x00000000ff000000 ) << 8 ) | ( ( address & 0x000000ff00000000 ) >> 8 ) | ( ( address & 0x0000ff0000000000 ) >> 24 ) | ( ( address & 0x00ff000000000000 ) >> 40 ) | ( ( address & 0xff00000000000000 ) >> 56 )
if 90 - 90: iII111i . OoOoOO00 + i1IIi % ooOoO0o * I11i + OoooooooOO
if 2 - 2: o0oOOo0O0Ooo . II111iiii
if 9 - 9: I1Ii111 - II111iiii + OoOoOO00 . OoO0O00
if 33 - 33: Oo0Ooo
if 12 - 12: i11iIiiIii . Oo0Ooo / OoOoOO00 + iII111i . Ii1I + ooOoO0o
if 66 - 66: IiII
if 41 - 41: II111iiii + Oo0Ooo / iII111i . IiII / iII111i / I1IiiI
if 78 - 78: o0oOOo0O0Ooo % OoOoOO00 . O0
return ( iIiIi1iI11iiI )
if 41 - 41: iIii1I11I1II1 . OOooOOo - Oo0Ooo % OOooOOo
if 90 - 90: i11iIiiIii + OoooooooOO - i11iIiiIii + OoooooooOO
if 23 - 23: i11iIiiIii - IiII - I1ii11iIi11i + I1ii11iIi11i % I1IiiI
if 79 - 79: II111iiii / OoooooooOO
if 35 - 35: i1IIi + IiII + II111iiii % OOooOOo
if 25 - 25: I11i + i11iIiiIii + O0 - Ii1I
if 69 - 69: I11i . OoOoOO00 / OOooOOo / i1IIi . II111iiii
if 17 - 17: I1Ii111
if 2 - 2: O0 % OoOoOO00 + oO0o
if 24 - 24: iII111i + iII111i - OoooooooOO % OoooooooOO * O0
if 51 - 51: IiII
if 31 - 31: I11i - iIii1I11I1II1 * Ii1I + Ii1I
if 10 - 10: OoOoOO00 - i11iIiiIii % iIii1I11I1II1 / ooOoO0o * i11iIiiIii - Ii1I
if 64 - 64: II111iiii . i11iIiiIii . iII111i . OOooOOo
if 95 - 95: O0 - OoOoOO00
class lisp_cache_entries ( ) :
def __init__ ( self ) :
self . entries = { }
self . entries_sorted = [ ]
if 68 - 68: ooOoO0o . I1Ii111
if 84 - 84: OoooooooOO + oO0o % i1IIi + o0oOOo0O0Ooo * i1IIi
if 51 - 51: oO0o . OoooooooOO + OOooOOo * I1ii11iIi11i - ooOoO0o
class lisp_cache ( ) :
def __init__ ( self ) :
self . cache = { }
self . cache_sorted = [ ]
self . cache_count = 0
if 41 - 41: Oo0Ooo
if 46 - 46: i11iIiiIii + iIii1I11I1II1 . i11iIiiIii . iII111i
def cache_size ( self ) :
return ( self . cache_count )
if 66 - 66: oO0o % i1IIi % OoooooooOO
if 58 - 58: OOooOOo
def build_key ( self , prefix ) :
if ( prefix . afi == LISP_AFI_ULTIMATE_ROOT ) :
IIiiiII = 0
elif ( prefix . afi == LISP_AFI_IID_RANGE ) :
IIiiiII = prefix . mask_len
else :
IIiiiII = prefix . mask_len + 48
if 89 - 89: iIii1I11I1II1 - i1IIi
if 26 - 26: OOooOOo - iII111i * I1ii11iIi11i / iII111i
II1 = lisp_hex_string ( prefix . instance_id ) . zfill ( 8 )
o0o0O00oOo = lisp_hex_string ( prefix . afi ) . zfill ( 4 )
if 9 - 9: I1Ii111 / II111iiii * I1Ii111 / I11i - OoO0O00
if ( prefix . afi > 0 ) :
if ( prefix . is_binary ( ) ) :
OOOOO000oo0 = prefix . addr_length ( ) * 2
iIiIi1iI11iiI = lisp_hex_string ( prefix . address ) . zfill ( OOOOO000oo0 )
else :
iIiIi1iI11iiI = prefix . address
if 36 - 36: IiII . OoOoOO00 . Ii1I
elif ( prefix . afi == LISP_AFI_GEO_COORD ) :
o0o0O00oOo = "8003"
iIiIi1iI11iiI = prefix . address . print_geo ( )
else :
o0o0O00oOo = ""
iIiIi1iI11iiI = ""
if 31 - 31: iIii1I11I1II1
if 84 - 84: I1ii11iIi11i - iII111i * I1IiiI
Iiii11 = II1 + o0o0O00oOo + iIiIi1iI11iiI
return ( [ IIiiiII , Iiii11 ] )
if 88 - 88: OOooOOo / Oo0Ooo
if 31 - 31: II111iiii
def add_cache ( self , prefix , entry ) :
if ( prefix . is_binary ( ) ) : prefix . zero_host_bits ( )
IIiiiII , Iiii11 = self . build_key ( prefix )
if ( self . cache . has_key ( IIiiiII ) == False ) :
self . cache [ IIiiiII ] = lisp_cache_entries ( )
self . cache [ IIiiiII ] . entries = { }
self . cache [ IIiiiII ] . entries_sorted = [ ]
self . cache_sorted = sorted ( self . cache )
if 32 - 32: o0oOOo0O0Ooo % o0oOOo0O0Ooo
if ( self . cache [ IIiiiII ] . entries . has_key ( Iiii11 ) == False ) :
self . cache_count += 1
if 67 - 67: IiII + oO0o * IiII
self . cache [ IIiiiII ] . entries [ Iiii11 ] = entry
self . cache [ IIiiiII ] . entries_sorted = sorted ( self . cache [ IIiiiII ] . entries )
if 26 - 26: I1ii11iIi11i + i1IIi . i1IIi - oO0o + I1IiiI * o0oOOo0O0Ooo
if 62 - 62: ooOoO0o + ooOoO0o % I11i
def lookup_cache ( self , prefix , exact ) :
oooI111iiiii1I , Iiii11 = self . build_key ( prefix )
if ( exact ) :
if ( self . cache . has_key ( oooI111iiiii1I ) == False ) : return ( None )
if ( self . cache [ oooI111iiiii1I ] . entries . has_key ( Iiii11 ) == False ) : return ( None )
return ( self . cache [ oooI111iiiii1I ] . entries [ Iiii11 ] )
if 15 - 15: I1ii11iIi11i * iII111i + i11iIiiIii
if 68 - 68: i1IIi / oO0o * I1ii11iIi11i - OoOoOO00 + Oo0Ooo / O0
iIIi11Ii1iII = None
for IIiiiII in self . cache_sorted :
if ( oooI111iiiii1I < IIiiiII ) : return ( iIIi11Ii1iII )
for i1II1111 in self . cache [ IIiiiII ] . entries_sorted :
oO0 = self . cache [ IIiiiII ] . entries
if ( i1II1111 in oO0 ) :
iiIIIIiI111 = oO0 [ i1II1111 ]
if ( iiIIIIiI111 == None ) : continue
if ( prefix . is_more_specific ( iiIIIIiI111 . eid ) ) : iIIi11Ii1iII = iiIIIIiI111
if 78 - 78: O0 . Ii1I - I1ii11iIi11i
if 69 - 69: O0 % O0 . oO0o * OoooooooOO
if 13 - 13: i1IIi % oO0o . OoooooooOO + I1ii11iIi11i - OOooOOo
return ( iIIi11Ii1iII )
if 99 - 99: OoooooooOO % OOooOOo / I11i
if 77 - 77: II111iiii - IiII % OOooOOo
def delete_cache ( self , prefix ) :
IIiiiII , Iiii11 = self . build_key ( prefix )
if ( self . cache . has_key ( IIiiiII ) == False ) : return
if ( self . cache [ IIiiiII ] . entries . has_key ( Iiii11 ) == False ) : return
self . cache [ IIiiiII ] . entries . pop ( Iiii11 )
self . cache [ IIiiiII ] . entries_sorted . remove ( Iiii11 )
self . cache_count -= 1
if 22 - 22: OoooooooOO / oO0o
if 78 - 78: oO0o * I11i . i1IIi % i1IIi + i1IIi / OOooOOo
def walk_cache ( self , function , parms ) :
for IIiiiII in self . cache_sorted :
for Iiii11 in self . cache [ IIiiiII ] . entries_sorted :
iiIIIIiI111 = self . cache [ IIiiiII ] . entries [ Iiii11 ]
OooO000oo0o , parms = function ( iiIIIIiI111 , parms )
if ( OooO000oo0o == False ) : return ( parms )
if 50 - 50: OoO0O00 * O0 - IiII . o0oOOo0O0Ooo - iII111i
if 18 - 18: II111iiii * OoooooooOO - Oo0Ooo . iII111i - Oo0Ooo
return ( parms )
if 82 - 82: I1Ii111 . OoOoOO00 - iIii1I11I1II1 - OoO0O00
if 86 - 86: iIii1I11I1II1
def print_cache ( self ) :
lprint ( "Printing contents of {}: " . format ( self ) )
if ( self . cache_size ( ) == 0 ) :
lprint ( " Cache is empty" )
return
if 54 - 54: II111iiii
for IIiiiII in self . cache_sorted :
for Iiii11 in self . cache [ IIiiiII ] . entries_sorted :
iiIIIIiI111 = self . cache [ IIiiiII ] . entries [ Iiii11 ]
lprint ( " Mask-length: {}, key: {}, entry: {}" . format ( IIiiiII , Iiii11 ,
iiIIIIiI111 ) )
if 98 - 98: Oo0Ooo + IiII . Oo0Ooo / OoOoOO00 + O0
if 99 - 99: Oo0Ooo
if 42 - 42: I1IiiI + I1Ii111 - oO0o + o0oOOo0O0Ooo
if 86 - 86: Ii1I - o0oOOo0O0Ooo % iII111i
if 37 - 37: Oo0Ooo
if 87 - 87: I1ii11iIi11i . OoooooooOO . ooOoO0o + iIii1I11I1II1 + O0 % I1ii11iIi11i
if 53 - 53: IiII
if 96 - 96: Oo0Ooo . i11iIiiIii / Ii1I . I1ii11iIi11i % I1Ii111
lisp_referral_cache = lisp_cache ( )
lisp_ddt_cache = lisp_cache ( )
lisp_sites_by_eid = lisp_cache ( )
lisp_map_cache = lisp_cache ( )
lisp_db_for_lookups = lisp_cache ( )
if 68 - 68: ooOoO0o
if 58 - 58: iII111i * I1IiiI
if 82 - 82: Oo0Ooo / OoO0O00 % Oo0Ooo . ooOoO0o * O0
if 39 - 39: I1Ii111 * IiII
if 16 - 16: ooOoO0o + OoO0O00 / I11i * OoO0O00 . Oo0Ooo % OoOoOO00
if 65 - 65: Oo0Ooo / I1Ii111 % II111iiii % Ii1I
if 70 - 70: II111iiii % Oo0Ooo * oO0o
def lisp_map_cache_lookup ( source , dest ) :
if 54 - 54: O0 / ooOoO0o * I1Ii111
O0OOo0OO0oOo = dest . is_multicast_address ( )
if 5 - 5: Ii1I / OoOoOO00 - O0 * OoO0O00
if 13 - 13: IiII + Oo0Ooo - I1Ii111
if 10 - 10: OOooOOo % OoooooooOO / I1IiiI . II111iiii % iII111i
if 47 - 47: o0oOOo0O0Ooo . i11iIiiIii * i1IIi % I11i - ooOoO0o * oO0o
ooooOoo000O = lisp_map_cache . lookup_cache ( dest , False )
if ( ooooOoo000O == None ) :
oO00oo000O = source . print_sg ( dest ) if O0OOo0OO0oOo else dest . print_address ( )
oO00oo000O = green ( oO00oo000O , False )
dprint ( "Lookup for EID {} not found in map-cache" . format ( oO00oo000O ) )
return ( None )
if 95 - 95: oO0o / Ii1I + OoO0O00
if 57 - 57: iIii1I11I1II1 + I1Ii111 % oO0o - Ii1I . I1IiiI
if 39 - 39: OoO0O00 + II111iiii
if 98 - 98: O0 - I1Ii111 % oO0o - iII111i + Ii1I * i1IIi
if 76 - 76: o0oOOo0O0Ooo
if ( O0OOo0OO0oOo == False ) :
i1ii1I11iIII = green ( ooooOoo000O . eid . print_prefix ( ) , False )
dprint ( "Lookup for EID {} found map-cache entry {}" . format ( green ( dest . print_address ( ) , False ) , i1ii1I11iIII ) )
if 55 - 55: OOooOOo + I1ii11iIi11i * Oo0Ooo
return ( ooooOoo000O )
if 11 - 11: i1IIi - OoooooooOO * OoOoOO00 / oO0o - OoooooooOO - I1IiiI
if 22 - 22: i11iIiiIii . Ii1I . Oo0Ooo * Oo0Ooo - iII111i / I1ii11iIi11i
if 49 - 49: iII111i + I11i . Oo0Ooo
if 23 - 23: I1IiiI . Ii1I + ooOoO0o . OoooooooOO
if 57 - 57: OOooOOo / OoOoOO00 / i11iIiiIii - I11i - I11i . Ii1I
ooooOoo000O = ooooOoo000O . lookup_source_cache ( source , False )
if ( ooooOoo000O == None ) :
oO00oo000O = source . print_sg ( dest )
dprint ( "Lookup for EID {} not found in map-cache" . format ( oO00oo000O ) )
return ( None )
if 53 - 53: ooOoO0o . iII111i + Ii1I * I1Ii111
if 49 - 49: II111iiii . I1ii11iIi11i * OoOoOO00 - OOooOOo
if 48 - 48: OoO0O00 . iIii1I11I1II1 - OoooooooOO + I1Ii111 / i11iIiiIii . Oo0Ooo
if 61 - 61: II111iiii + OOooOOo . o0oOOo0O0Ooo . iIii1I11I1II1
if 63 - 63: I11i + i11iIiiIii . o0oOOo0O0Ooo . i1IIi + OoOoOO00
i1ii1I11iIII = green ( ooooOoo000O . print_eid_tuple ( ) , False )
dprint ( "Lookup for EID {} found map-cache entry {}" . format ( green ( source . print_sg ( dest ) , False ) , i1ii1I11iIII ) )
if 1 - 1: i11iIiiIii
return ( ooooOoo000O )
if 1 - 1: iIii1I11I1II1
if 73 - 73: iII111i + IiII
if 95 - 95: O0
if 75 - 75: ooOoO0o
if 8 - 8: O0 - OoooooooOO + I1ii11iIi11i / Oo0Ooo . oO0o + I1Ii111
if 85 - 85: ooOoO0o
if 29 - 29: iII111i . Ii1I
def lisp_referral_cache_lookup ( eid , group , exact ) :
if ( group and group . is_null ( ) ) :
IiIIiIiI1II = lisp_referral_cache . lookup_cache ( eid , exact )
return ( IiIIiIiI1II )
if 43 - 43: I11i - I1ii11iIi11i + iIii1I11I1II1 / I1ii11iIi11i * oO0o / iIii1I11I1II1
if 45 - 45: IiII
if 49 - 49: I1IiiI . Ii1I * I1IiiI - OoooooooOO . I11i / I1Ii111
if 9 - 9: iIii1I11I1II1 * Ii1I / O0 - OOooOOo
if 95 - 95: i11iIiiIii * II111iiii * OOooOOo * iIii1I11I1II1
if ( eid == None or eid . is_null ( ) ) : return ( None )
if 22 - 22: iIii1I11I1II1 / I1IiiI + OoOoOO00 - OOooOOo . i11iIiiIii / i11iIiiIii
if 10 - 10: iIii1I11I1II1 % i1IIi
if 78 - 78: I11i + II111iiii % o0oOOo0O0Ooo
if 17 - 17: i11iIiiIii + oO0o * iII111i . II111iiii
if 44 - 44: I1ii11iIi11i
if 39 - 39: iII111i + Oo0Ooo / oO0o
IiIIiIiI1II = lisp_referral_cache . lookup_cache ( group , exact )
if ( IiIIiIiI1II == None ) : return ( None )
if 95 - 95: I1Ii111 * oO0o / ooOoO0o . Ii1I . OoOoOO00
ooo0oOooOO0o0 = IiIIiIiI1II . lookup_source_cache ( eid , exact )
if ( ooo0oOooOO0o0 ) : return ( ooo0oOooOO0o0 )
if 91 - 91: II111iiii + I11i + i1IIi
if ( exact ) : IiIIiIiI1II = None
return ( IiIIiIiI1II )
if 85 - 85: Ii1I * Ii1I . OoOoOO00 / Oo0Ooo
if 97 - 97: oO0o % iIii1I11I1II1
if 87 - 87: II111iiii % I1IiiI + oO0o - I11i / I11i
if 16 - 16: I1IiiI
if 39 - 39: ooOoO0o * II111iiii
if 90 - 90: OoooooooOO * ooOoO0o
if 14 - 14: I1IiiI % i1IIi
def lisp_ddt_cache_lookup ( eid , group , exact ) :
if ( group . is_null ( ) ) :
oO00oOo = lisp_ddt_cache . lookup_cache ( eid , exact )
return ( oO00oOo )
if 35 - 35: ooOoO0o % o0oOOo0O0Ooo % ooOoO0o
if 77 - 77: OOooOOo % I1Ii111 / i11iIiiIii . i1IIi % OOooOOo
if 55 - 55: i1IIi
if 64 - 64: oO0o . OOooOOo * i11iIiiIii + I1Ii111
if 88 - 88: O0
if ( eid . is_null ( ) ) : return ( None )
if 75 - 75: iII111i - Oo0Ooo / OoooooooOO - O0
if 36 - 36: OoO0O00 % Ii1I . Oo0Ooo
if 90 - 90: i11iIiiIii - iII111i * oO0o
if 79 - 79: IiII
if 38 - 38: I1Ii111
if 56 - 56: i11iIiiIii
oO00oOo = lisp_ddt_cache . lookup_cache ( group , exact )
if ( oO00oOo == None ) : return ( None )
if 58 - 58: i11iIiiIii / OoOoOO00
IIIiIII1II = oO00oOo . lookup_source_cache ( eid , exact )
if ( IIIiIII1II ) : return ( IIIiIII1II )
if 20 - 20: Oo0Ooo
if ( exact ) : oO00oOo = None
return ( oO00oOo )
if 45 - 45: iIii1I11I1II1 % O0 / I1IiiI . o0oOOo0O0Ooo * IiII
if 87 - 87: II111iiii / OoooooooOO * II111iiii % i11iIiiIii - ooOoO0o + II111iiii
if 39 - 39: I1Ii111
if 51 - 51: o0oOOo0O0Ooo * I11i
if 42 - 42: OOooOOo % I11i
if 84 - 84: Oo0Ooo * OoOoOO00 / Ii1I / IiII / o0oOOo0O0Ooo . I1ii11iIi11i
if 81 - 81: I1IiiI
def lisp_site_eid_lookup ( eid , group , exact ) :
if 82 - 82: I1Ii111 - OoooooooOO - Ii1I
if ( group . is_null ( ) ) :
ooO00oO0O = lisp_sites_by_eid . lookup_cache ( eid , exact )
return ( ooO00oO0O )
if 34 - 34: OOooOOo . iIii1I11I1II1 / I1IiiI . Oo0Ooo - iIii1I11I1II1
if 83 - 83: iII111i - I1ii11iIi11i + iII111i
if 4 - 4: o0oOOo0O0Ooo % iIii1I11I1II1 + I11i
if 60 - 60: I1ii11iIi11i / I1Ii111 % i11iIiiIii % oO0o % I1IiiI . Oo0Ooo
if 20 - 20: IiII - OOooOOo + OoOoOO00
if ( eid . is_null ( ) ) : return ( None )
if 83 - 83: OoooooooOO / I1IiiI + iII111i - iIii1I11I1II1 % ooOoO0o
if 74 - 74: OoO0O00
if 13 - 13: I1ii11iIi11i / OoO0O00
if 90 - 90: iIii1I11I1II1 - OoO0O00 . i1IIi / o0oOOo0O0Ooo + O0
if 94 - 94: IiII * i1IIi
if 90 - 90: O0 % I1IiiI . o0oOOo0O0Ooo % ooOoO0o % I1IiiI
ooO00oO0O = lisp_sites_by_eid . lookup_cache ( group , exact )
if ( ooO00oO0O == None ) : return ( None )
if 16 - 16: OoO0O00 / OOooOOo / iIii1I11I1II1 / OoooooooOO . oO0o - I1Ii111
if 43 - 43: OoOoOO00 % OOooOOo / I1IiiI + I1IiiI
if 40 - 40: OOooOOo . I1Ii111 + I1Ii111
if 4 - 4: iIii1I11I1II1 - iIii1I11I1II1 * I11i
if 32 - 32: I1IiiI + II111iiii * iII111i + O0 / O0 * Oo0Ooo
if 64 - 64: i11iIiiIii / iII111i + i11iIiiIii . I11i
if 66 - 66: i1IIi
if 98 - 98: Oo0Ooo / iIii1I11I1II1
if 33 - 33: O0 - iII111i
if 40 - 40: iII111i * I11i
if 25 - 25: O0 * o0oOOo0O0Ooo % ooOoO0o % I1IiiI
if 87 - 87: OoOoOO00
if 30 - 30: IiII % OoOoOO00 + I1Ii111
if 13 - 13: iII111i * Ii1I % o0oOOo0O0Ooo * i1IIi . IiII % i1IIi
if 79 - 79: OoooooooOO % I11i / o0oOOo0O0Ooo + IiII + O0 + iII111i
if 87 - 87: I11i
if 39 - 39: I1ii11iIi11i * i11iIiiIii % I1Ii111
if 72 - 72: OoO0O00 * Oo0Ooo - IiII
oOoO = ooO00oO0O . lookup_source_cache ( eid , exact )
if ( oOoO ) : return ( oOoO )
if 74 - 74: Ii1I
if ( exact ) :
ooO00oO0O = None
else :
OOooOo00Ooo = ooO00oO0O . parent_for_more_specifics
if ( OOooOo00Ooo and OOooOo00Ooo . accept_more_specifics ) :
if ( group . is_more_specific ( OOooOo00Ooo . group ) ) : ooO00oO0O = OOooOo00Ooo
if 26 - 26: I11i . O0
if 68 - 68: Ii1I
return ( ooO00oO0O )
if 26 - 26: o0oOOo0O0Ooo - I1ii11iIi11i / O0 % i11iIiiIii
if 7 - 7: I1Ii111 . Oo0Ooo + IiII / iIii1I11I1II1
if 22 - 22: iIii1I11I1II1 - O0 . iII111i - IiII - ooOoO0o
if 54 - 54: OoO0O00 . iII111i . OoOoOO00 * OoO0O00 + o0oOOo0O0Ooo . ooOoO0o
if 44 - 44: I11i * iIii1I11I1II1 . I1ii11iIi11i
if 9 - 9: o0oOOo0O0Ooo
if 23 - 23: ooOoO0o * OoO0O00 + O0 % I1Ii111
if 21 - 21: Ii1I * OoOoOO00
if 29 - 29: iIii1I11I1II1 / ooOoO0o
if 75 - 75: OoooooooOO + I1IiiI % OoOoOO00 / O0 - IiII
if 88 - 88: OoO0O00 % Ii1I
if 12 - 12: OoooooooOO . O0
if 33 - 33: OoooooooOO / I11i . II111iiii * i1IIi
if 34 - 34: i11iIiiIii / OoOoOO00
if 100 - 100: o0oOOo0O0Ooo - I1IiiI / I11i
if 43 - 43: o0oOOo0O0Ooo % iIii1I11I1II1
if 85 - 85: oO0o + OoooooooOO - IiII % o0oOOo0O0Ooo * ooOoO0o * II111iiii
if 4 - 4: Ii1I . i1IIi + Oo0Ooo % I11i . OoO0O00
if 70 - 70: OOooOOo * OoOoOO00 / OoOoOO00 / OoOoOO00
if 23 - 23: I1IiiI
if 24 - 24: I1Ii111 * i1IIi % O0 * Ii1I + iII111i
if 14 - 14: oO0o * iII111i + Ii1I + Ii1I * IiII
if 82 - 82: IiII * ooOoO0o / OOooOOo + OoOoOO00
if 32 - 32: IiII
if 90 - 90: I1ii11iIi11i / I11i * o0oOOo0O0Ooo % O0 * i11iIiiIii
if 68 - 68: I11i . Ii1I + I11i / IiII . I11i / iIii1I11I1II1
class lisp_address ( ) :
def __init__ ( self , afi , addr_str , mask_len , iid ) :
self . afi = afi
self . mask_len = mask_len
self . instance_id = iid
self . iid_list = [ ]
self . address = 0
if ( addr_str != "" ) : self . store_address ( addr_str )
if 96 - 96: O0
if 2 - 2: OoO0O00 / iII111i + o0oOOo0O0Ooo
def copy_address ( self , addr ) :
if ( addr == None ) : return
self . afi = addr . afi
self . address = addr . address
self . mask_len = addr . mask_len
self . instance_id = addr . instance_id
self . iid_list = addr . iid_list
if 27 - 27: I11i - OoOoOO00 - ooOoO0o - I1IiiI
if 51 - 51: I11i + I11i + O0 + O0 * I1Ii111
def make_default_route ( self , addr ) :
self . afi = addr . afi
self . instance_id = addr . instance_id
self . mask_len = 0
self . address = 0
if 61 - 61: IiII . O0
if 38 - 38: Ii1I * I1ii11iIi11i - i11iIiiIii + ooOoO0o * I11i
def make_default_multicast_route ( self , addr ) :
self . afi = addr . afi
self . instance_id = addr . instance_id
if ( self . afi == LISP_AFI_IPV4 ) :
self . address = 0xe0000000
self . mask_len = 4
if 74 - 74: OoOoOO00 . o0oOOo0O0Ooo
if ( self . afi == LISP_AFI_IPV6 ) :
self . address = 0xff << 120
self . mask_len = 8
if 40 - 40: ooOoO0o + I1ii11iIi11i * i11iIiiIii / i1IIi
if ( self . afi == LISP_AFI_MAC ) :
self . address = 0xffffffffffff
self . mask_len = 48
if 95 - 95: oO0o / IiII * II111iiii * Ii1I . OoO0O00 . OoO0O00
if 85 - 85: I1IiiI / II111iiii * OoO0O00 + ooOoO0o / OoO0O00 % OOooOOo
if 100 - 100: I1Ii111 % OoooooooOO % OoOoOO00 % I1IiiI
def not_set ( self ) :
return ( self . afi == LISP_AFI_NONE )
if 32 - 32: OoO0O00 + OOooOOo . OoO0O00 - Oo0Ooo
if 12 - 12: I1IiiI * OoO0O00 - II111iiii . i1IIi
def is_private_address ( self ) :
if ( self . is_ipv4 ( ) == False ) : return ( False )
iIiIi1iI11iiI = self . address
if ( ( ( iIiIi1iI11iiI & 0xff000000 ) >> 24 ) == 10 ) : return ( True )
if ( ( ( iIiIi1iI11iiI & 0xff000000 ) >> 24 ) == 172 ) :
oOOo = ( iIiIi1iI11iiI & 0x00ff0000 ) >> 16
if ( oOOo >= 16 and oOOo <= 31 ) : return ( True )
if 88 - 88: I1ii11iIi11i * IiII - I1Ii111 / OoooooooOO
if ( ( ( iIiIi1iI11iiI & 0xffff0000 ) >> 16 ) == 0xc0a8 ) : return ( True )
return ( False )
if 99 - 99: o0oOOo0O0Ooo
if 34 - 34: ooOoO0o / OoooooooOO . OOooOOo . OoO0O00 . IiII / Ii1I
def is_multicast_address ( self ) :
if ( self . is_ipv4 ( ) ) : return ( self . is_ipv4_multicast ( ) )
if ( self . is_ipv6 ( ) ) : return ( self . is_ipv6_multicast ( ) )
if ( self . is_mac ( ) ) : return ( self . is_mac_multicast ( ) )
return ( False )
if 73 - 73: iII111i / iIii1I11I1II1
if 7 - 7: iII111i + OoOoOO00 - OoooooooOO % OoOoOO00 . oO0o * I1Ii111
def host_mask_len ( self ) :
if ( self . afi == LISP_AFI_IPV4 ) : return ( LISP_IPV4_HOST_MASK_LEN )
if ( self . afi == LISP_AFI_IPV6 ) : return ( LISP_IPV6_HOST_MASK_LEN )
if ( self . afi == LISP_AFI_MAC ) : return ( LISP_MAC_HOST_MASK_LEN )
if ( self . afi == LISP_AFI_E164 ) : return ( LISP_E164_HOST_MASK_LEN )
if ( self . afi == LISP_AFI_NAME ) : return ( len ( self . address ) * 8 )
if ( self . afi == LISP_AFI_GEO_COORD ) :
return ( len ( self . address . print_geo ( ) ) * 8 )
if 82 - 82: iIii1I11I1II1 / oO0o * iII111i . OoOoOO00 + II111iiii
return ( 0 )
if 77 - 77: I1IiiI
if 9 - 9: i11iIiiIii + OOooOOo * OoO0O00
def is_iana_eid ( self ) :
if ( self . is_ipv6 ( ) == False ) : return ( False )
iIiIi1iI11iiI = self . address >> 96
return ( iIiIi1iI11iiI == 0x20010005 )
if 9 - 9: OOooOOo
if 67 - 67: Oo0Ooo / I1Ii111 . ooOoO0o % oO0o / Oo0Ooo
def addr_length ( self ) :
if ( self . afi == LISP_AFI_IPV4 ) : return ( 4 )
if ( self . afi == LISP_AFI_IPV6 ) : return ( 16 )
if ( self . afi == LISP_AFI_MAC ) : return ( 6 )
if ( self . afi == LISP_AFI_E164 ) : return ( 8 )
if ( self . afi == LISP_AFI_LCAF ) : return ( 0 )
if ( self . afi == LISP_AFI_NAME ) : return ( len ( self . address ) + 1 )
if ( self . afi == LISP_AFI_IID_RANGE ) : return ( 4 )
if ( self . afi == LISP_AFI_GEO_COORD ) :
return ( len ( self . address . print_geo ( ) ) )
if 49 - 49: ooOoO0o + I1IiiI
return ( 0 )
if 70 - 70: o0oOOo0O0Ooo + Ii1I . OoO0O00 * Ii1I + OOooOOo + ooOoO0o
if 13 - 13: I1ii11iIi11i
def afi_to_version ( self ) :
if ( self . afi == LISP_AFI_IPV4 ) : return ( 4 )
if ( self . afi == LISP_AFI_IPV6 ) : return ( 6 )
return ( 0 )
if 97 - 97: oO0o - Oo0Ooo . i11iIiiIii % ooOoO0o * i11iIiiIii - OoooooooOO
if 44 - 44: I11i % OoooooooOO / iII111i - i11iIiiIii * i1IIi * o0oOOo0O0Ooo
def packet_format ( self ) :
if 51 - 51: Ii1I + IiII / I1ii11iIi11i + O0 % Ii1I
if 55 - 55: iII111i % o0oOOo0O0Ooo - oO0o % OoooooooOO
if 18 - 18: OoooooooOO - I1ii11iIi11i
if 94 - 94: OOooOOo . Oo0Ooo + Ii1I * o0oOOo0O0Ooo
if 79 - 79: OOooOOo + Oo0Ooo
if ( self . afi == LISP_AFI_IPV4 ) : return ( "I" )
if ( self . afi == LISP_AFI_IPV6 ) : return ( "QQ" )
if ( self . afi == LISP_AFI_MAC ) : return ( "HHH" )
if ( self . afi == LISP_AFI_E164 ) : return ( "II" )
if ( self . afi == LISP_AFI_LCAF ) : return ( "I" )
return ( "" )
if 33 - 33: iIii1I11I1II1
if 75 - 75: I1Ii111 / iIii1I11I1II1 . OoooooooOO
def pack_address ( self ) :
IIiI1I11ii1i = self . packet_format ( )
oOo = ""
if ( self . is_ipv4 ( ) ) :
oOo = struct . pack ( IIiI1I11ii1i , socket . htonl ( self . address ) )
elif ( self . is_ipv6 ( ) ) :
OOoO0oO00o = byte_swap_64 ( self . address >> 64 )
OOO0OoO0oo0OO = byte_swap_64 ( self . address & 0xffffffffffffffff )
oOo = struct . pack ( IIiI1I11ii1i , OOoO0oO00o , OOO0OoO0oo0OO )
elif ( self . is_mac ( ) ) :
iIiIi1iI11iiI = self . address
OOoO0oO00o = ( iIiIi1iI11iiI >> 32 ) & 0xffff
OOO0OoO0oo0OO = ( iIiIi1iI11iiI >> 16 ) & 0xffff
ooOoo = iIiIi1iI11iiI & 0xffff
oOo = struct . pack ( IIiI1I11ii1i , OOoO0oO00o , OOO0OoO0oo0OO , ooOoo )
elif ( self . is_e164 ( ) ) :
iIiIi1iI11iiI = self . address
OOoO0oO00o = ( iIiIi1iI11iiI >> 32 ) & 0xffffffff
OOO0OoO0oo0OO = ( iIiIi1iI11iiI & 0xffffffff )
oOo = struct . pack ( IIiI1I11ii1i , OOoO0oO00o , OOO0OoO0oo0OO )
elif ( self . is_dist_name ( ) ) :
oOo += self . address + "\0"
if 80 - 80: II111iiii . Oo0Ooo * oO0o % II111iiii / I1ii11iIi11i
return ( oOo )
if 66 - 66: iII111i / OoO0O00 / i11iIiiIii
if 99 - 99: OOooOOo
def unpack_address ( self , packet ) :
IIiI1I11ii1i = self . packet_format ( )
i1II1i1iiI1 = struct . calcsize ( IIiI1I11ii1i )
if ( len ( packet ) < i1II1i1iiI1 ) : return ( None )
if 51 - 51: i11iIiiIii . o0oOOo0O0Ooo / iII111i
iIiIi1iI11iiI = struct . unpack ( IIiI1I11ii1i , packet [ : i1II1i1iiI1 ] )
if 53 - 53: oO0o / i1IIi - Oo0Ooo - i1IIi + IiII
if ( self . is_ipv4 ( ) ) :
self . address = socket . ntohl ( iIiIi1iI11iiI [ 0 ] )
if 79 - 79: oO0o % o0oOOo0O0Ooo / o0oOOo0O0Ooo % iII111i
elif ( self . is_ipv6 ( ) ) :
if 56 - 56: Oo0Ooo % I1ii11iIi11i
if 53 - 53: OoO0O00 . I11i - ooOoO0o
if 11 - 11: I11i + i11iIiiIii / oO0o % oO0o * o0oOOo0O0Ooo / OoOoOO00
if 74 - 74: oO0o . I1Ii111 . II111iiii
if 92 - 92: I1Ii111 % OoooooooOO * I1Ii111
if 78 - 78: Oo0Ooo . I11i . oO0o + O0 / O0
if 41 - 41: iII111i * OoO0O00 - OoO0O00
if 72 - 72: o0oOOo0O0Ooo + oO0o . I1ii11iIi11i + OoO0O00 / I1Ii111
if ( iIiIi1iI11iiI [ 0 ] <= 0xffff and ( iIiIi1iI11iiI [ 0 ] & 0xff ) == 0 ) :
OO0o = ( iIiIi1iI11iiI [ 0 ] << 48 ) << 64
else :
OO0o = byte_swap_64 ( iIiIi1iI11iiI [ 0 ] ) << 64
if 67 - 67: II111iiii
ii11 = byte_swap_64 ( iIiIi1iI11iiI [ 1 ] )
self . address = OO0o | ii11
if 50 - 50: o0oOOo0O0Ooo . iIii1I11I1II1 % o0oOOo0O0Ooo
elif ( self . is_mac ( ) ) :
iIii = iIiIi1iI11iiI [ 0 ]
iIiII = iIiIi1iI11iiI [ 1 ]
iiii111I1I = iIiIi1iI11iiI [ 2 ]
self . address = ( iIii << 32 ) + ( iIiII << 16 ) + iiii111I1I
if 78 - 78: oO0o . o0oOOo0O0Ooo - OOooOOo + OoooooooOO % OOooOOo
elif ( self . is_e164 ( ) ) :
self . address = ( iIiIi1iI11iiI [ 0 ] << 32 ) + iIiIi1iI11iiI [ 1 ]
if 27 - 27: OOooOOo * O0 * i11iIiiIii / OoOoOO00 - i1IIi
elif ( self . is_dist_name ( ) ) :
packet , self . address = lisp_decode_dist_name ( packet )
self . mask_len = len ( self . address ) * 8
i1II1i1iiI1 = 0
if 73 - 73: iII111i / I1IiiI * ooOoO0o
packet = packet [ i1II1i1iiI1 : : ]
return ( packet )
if 85 - 85: I11i + I11i + oO0o - OoOoOO00
if 15 - 15: OoO0O00
def is_ipv4 ( self ) :
return ( True if ( self . afi == LISP_AFI_IPV4 ) else False )
if 88 - 88: Ii1I % i1IIi / I1Ii111
if 2 - 2: Ii1I . IiII % OoOoOO00
def is_ipv4_link_local ( self ) :
if ( self . is_ipv4 ( ) == False ) : return ( False )
return ( ( ( self . address >> 16 ) & 0xffff ) == 0xa9fe )
if 42 - 42: OoOoOO00 * OoO0O00 * IiII - IiII % Oo0Ooo . IiII
if 38 - 38: I1Ii111 . IiII - ooOoO0o . i11iIiiIii
def is_ipv4_loopback ( self ) :
if ( self . is_ipv4 ( ) == False ) : return ( False )
return ( self . address == 0x7f000001 )
if 35 - 35: i11iIiiIii
if 62 - 62: O0 - o0oOOo0O0Ooo + I1Ii111 * I1ii11iIi11i / OOooOOo
def is_ipv4_multicast ( self ) :
if ( self . is_ipv4 ( ) == False ) : return ( False )
return ( ( ( self . address >> 24 ) & 0xf0 ) == 0xe0 )
if 87 - 87: Oo0Ooo / OoooooooOO + O0 / o0oOOo0O0Ooo % II111iiii - O0
if 63 - 63: OOooOOo - OoO0O00 * i1IIi - I1ii11iIi11i . I1IiiI
def is_ipv4_string ( self , addr_str ) :
return ( addr_str . find ( "." ) != - 1 )
if 59 - 59: i11iIiiIii . OOooOOo % Oo0Ooo + O0
if 84 - 84: I1Ii111 / O0 - IiII . I11i / o0oOOo0O0Ooo
def is_ipv6 ( self ) :
return ( True if ( self . afi == LISP_AFI_IPV6 ) else False )
if 12 - 12: i11iIiiIii / Ii1I + i1IIi
if 54 - 54: I1IiiI
def is_ipv6_link_local ( self ) :
if ( self . is_ipv6 ( ) == False ) : return ( False )
return ( ( ( self . address >> 112 ) & 0xffff ) == 0xfe80 )
if 55 - 55: I1ii11iIi11i % IiII % o0oOOo0O0Ooo + i1IIi * OoooooooOO % II111iiii
if 37 - 37: Oo0Ooo
def is_ipv6_string_link_local ( self , addr_str ) :
return ( addr_str . find ( "fe80::" ) != - 1 )
if 33 - 33: OoooooooOO - O0 . O0 - o0oOOo0O0Ooo % o0oOOo0O0Ooo % OoO0O00
if 27 - 27: ooOoO0o . i11iIiiIii / o0oOOo0O0Ooo * OoO0O00 * OoOoOO00 * oO0o
def is_ipv6_loopback ( self ) :
if ( self . is_ipv6 ( ) == False ) : return ( False )
return ( self . address == 1 )
if 19 - 19: O0 * II111iiii * OoOoOO00
if 53 - 53: Oo0Ooo
def is_ipv6_multicast ( self ) :
if ( self . is_ipv6 ( ) == False ) : return ( False )
return ( ( ( self . address >> 120 ) & 0xff ) == 0xff )
if 16 - 16: Ii1I
if 73 - 73: i11iIiiIii + I1IiiI - IiII - IiII + IiII . Ii1I
def is_ipv6_string ( self , addr_str ) :
return ( addr_str . find ( ":" ) != - 1 )
if 78 - 78: OoO0O00 + oO0o
if 86 - 86: ooOoO0o . ooOoO0o + oO0o
def is_mac ( self ) :
return ( True if ( self . afi == LISP_AFI_MAC ) else False )
if 84 - 84: OOooOOo - OoOoOO00 + i1IIi * I1ii11iIi11i % I1ii11iIi11i * I1Ii111
if 31 - 31: IiII + iII111i
def is_mac_multicast ( self ) :
if ( self . is_mac ( ) == False ) : return ( False )
return ( ( self . address & 0x010000000000 ) != 0 )
if 5 - 5: O0 * Ii1I
if 78 - 78: iII111i * iIii1I11I1II1 . OoO0O00 . OoOoOO00 % I1Ii111
def is_mac_broadcast ( self ) :
if ( self . is_mac ( ) == False ) : return ( False )
return ( self . address == 0xffffffffffff )
if 77 - 77: OOooOOo / OoooooooOO
if 11 - 11: iIii1I11I1II1 - Ii1I - OoOoOO00 . oO0o / I1ii11iIi11i
def is_mac_string ( self , addr_str ) :
return ( len ( addr_str ) == 15 and addr_str . find ( "-" ) != - 1 )
if 79 - 79: i11iIiiIii % o0oOOo0O0Ooo * II111iiii . i1IIi * Ii1I - i11iIiiIii
if 31 - 31: IiII / o0oOOo0O0Ooo
def is_link_local_multicast ( self ) :
if ( self . is_ipv4 ( ) ) :
return ( ( 0xe0ffff00 & self . address ) == 0xe0000000 )
if 27 - 27: Oo0Ooo
if ( self . is_ipv6 ( ) ) :
return ( ( self . address >> 112 ) & 0xffff == 0xff02 )
if 32 - 32: Oo0Ooo * i11iIiiIii % I1IiiI - i11iIiiIii - I1Ii111 % I1ii11iIi11i
return ( False )
if 35 - 35: o0oOOo0O0Ooo % iII111i / O0 * I1IiiI . o0oOOo0O0Ooo / OOooOOo
if 81 - 81: I1ii11iIi11i - i11iIiiIii
def is_null ( self ) :
return ( True if ( self . afi == LISP_AFI_NONE ) else False )
if 49 - 49: iII111i * I11i - II111iiii . o0oOOo0O0Ooo
if 52 - 52: Ii1I + Ii1I - II111iiii . O0 + I1ii11iIi11i
def is_ultimate_root ( self ) :
return ( True if self . afi == LISP_AFI_ULTIMATE_ROOT else False )
if 60 - 60: i11iIiiIii + IiII
if 41 - 41: I1Ii111 * o0oOOo0O0Ooo + Oo0Ooo
def is_iid_range ( self ) :
return ( True if self . afi == LISP_AFI_IID_RANGE else False )
if 86 - 86: Ii1I / oO0o
if 40 - 40: OoO0O00 % oO0o + Oo0Ooo
def is_e164 ( self ) :
return ( True if ( self . afi == LISP_AFI_E164 ) else False )
if 60 - 60: II111iiii / Ii1I
if 14 - 14: iII111i - Oo0Ooo / o0oOOo0O0Ooo * oO0o / Oo0Ooo - I1IiiI
def is_dist_name ( self ) :
return ( True if ( self . afi == LISP_AFI_NAME ) else False )
if 89 - 89: i1IIi / I1Ii111 + Ii1I - i1IIi
if 66 - 66: OoooooooOO
def is_geo_prefix ( self ) :
return ( True if ( self . afi == LISP_AFI_GEO_COORD ) else False )
if 68 - 68: iII111i + I1Ii111
if 90 - 90: o0oOOo0O0Ooo
def is_binary ( self ) :
if ( self . is_dist_name ( ) ) : return ( False )
if ( self . is_geo_prefix ( ) ) : return ( False )
return ( True )
if 48 - 48: iII111i + Ii1I
if 45 - 45: oO0o / iIii1I11I1II1 % O0 % IiII % I1ii11iIi11i
def store_address ( self , addr_str ) :
if ( self . afi == LISP_AFI_NONE ) : self . string_to_afi ( addr_str )
if 89 - 89: OOooOOo - I1Ii111 - iII111i
if 67 - 67: oO0o
if 76 - 76: I1IiiI % I1IiiI - IiII / OoOoOO00 / I1ii11iIi11i
if 42 - 42: I1IiiI + I1ii11iIi11i + Oo0Ooo * i1IIi - II111iiii
II11iIII1i1I = addr_str . find ( "[" )
o0000o0O0ooo = addr_str . find ( "]" )
if ( II11iIII1i1I != - 1 and o0000o0O0ooo != - 1 ) :
self . instance_id = int ( addr_str [ II11iIII1i1I + 1 : o0000o0O0ooo ] )
addr_str = addr_str [ o0000o0O0ooo + 1 : : ]
if ( self . is_dist_name ( ) == False ) :
addr_str = addr_str . replace ( " " , "" )
if 15 - 15: o0oOOo0O0Ooo
if 60 - 60: I1ii11iIi11i / I1Ii111
if 13 - 13: I1Ii111
if 52 - 52: II111iiii / OoO0O00 . Ii1I
if 68 - 68: iII111i
if 67 - 67: I1IiiI * I1IiiI
if ( self . is_ipv4 ( ) ) :
o0o0OoO0O00OO = addr_str . split ( "." )
ooOo0O0O0oOO0 = int ( o0o0OoO0O00OO [ 0 ] ) << 24
ooOo0O0O0oOO0 += int ( o0o0OoO0O00OO [ 1 ] ) << 16
ooOo0O0O0oOO0 += int ( o0o0OoO0O00OO [ 2 ] ) << 8
ooOo0O0O0oOO0 += int ( o0o0OoO0O00OO [ 3 ] )
self . address = ooOo0O0O0oOO0
elif ( self . is_ipv6 ( ) ) :
if 19 - 19: II111iiii * O0 % II111iiii
if 63 - 63: i11iIiiIii . Oo0Ooo . OOooOOo - II111iiii
if 35 - 35: II111iiii + IiII
if 66 - 66: o0oOOo0O0Ooo % IiII
if 39 - 39: IiII
if 18 - 18: iII111i % o0oOOo0O0Ooo - i1IIi
if 53 - 53: o0oOOo0O0Ooo + IiII - ooOoO0o % i11iIiiIii - i11iIiiIii - I1Ii111
if 79 - 79: II111iiii + i11iIiiIii . OOooOOo . I11i / iIii1I11I1II1
if 62 - 62: O0
if 52 - 52: OoooooooOO . oO0o
if 38 - 38: ooOoO0o . i1IIi / iII111i + I1IiiI - II111iiii
if 21 - 21: i11iIiiIii + II111iiii - i1IIi / OoooooooOO * OOooOOo % Oo0Ooo
if 59 - 59: Ii1I
if 77 - 77: I1ii11iIi11i * Ii1I * O0 * I1IiiI % OoO0O00 - iIii1I11I1II1
if 6 - 6: i11iIiiIii . I11i - OoooooooOO
if 26 - 26: I1IiiI
if 26 - 26: IiII . Ii1I / IiII - OoO0O00 % OoO0O00
OoOo0Ooo0Oooo = ( addr_str [ 2 : 4 ] == "::" )
try :
addr_str = socket . inet_pton ( socket . AF_INET6 , addr_str )
except :
addr_str = socket . inet_pton ( socket . AF_INET6 , "0::0" )
if 37 - 37: OoooooooOO * I1IiiI - I1ii11iIi11i
addr_str = binascii . hexlify ( addr_str )
if 37 - 37: OoooooooOO - OoOoOO00 . I1IiiI * oO0o - Oo0Ooo + I1IiiI
if ( OoOo0Ooo0Oooo ) :
addr_str = addr_str [ 2 : 4 ] + addr_str [ 0 : 2 ] + addr_str [ 4 : : ]
if 17 - 17: OOooOOo % I1IiiI - o0oOOo0O0Ooo + OoO0O00 + OoOoOO00 + i1IIi
self . address = int ( addr_str , 16 )
if 74 - 74: iIii1I11I1II1
elif ( self . is_geo_prefix ( ) ) :
O0OOoo = lisp_geo ( None )
O0OOoo . name = "geo-prefix-{}" . format ( O0OOoo )
O0OOoo . parse_geo_string ( addr_str )
self . address = O0OOoo
elif ( self . is_mac ( ) ) :
addr_str = addr_str . replace ( "-" , "" )
ooOo0O0O0oOO0 = int ( addr_str , 16 )
self . address = ooOo0O0O0oOO0
elif ( self . is_e164 ( ) ) :
addr_str = addr_str [ 1 : : ]
ooOo0O0O0oOO0 = int ( addr_str , 16 )
self . address = ooOo0O0O0oOO0 << 4
elif ( self . is_dist_name ( ) ) :
self . address = addr_str . replace ( "'" , "" )
if 8 - 8: OOooOOo % o0oOOo0O0Ooo
self . mask_len = self . host_mask_len ( )
if 36 - 36: Ii1I % OoooooooOO
if 31 - 31: Ii1I / Ii1I / Ii1I / o0oOOo0O0Ooo / I11i
def store_prefix ( self , prefix_str ) :
if ( self . is_geo_string ( prefix_str ) ) :
oo0OOo0O = prefix_str . find ( "]" )
Ooo = len ( prefix_str [ oo0OOo0O + 1 : : ] ) * 8
elif ( prefix_str . find ( "/" ) != - 1 ) :
prefix_str , Ooo = prefix_str . split ( "/" )
else :
i1II1I = prefix_str . find ( "'" )
if ( i1II1I == - 1 ) : return
iiiI111I = prefix_str . find ( "'" , i1II1I + 1 )
if ( iiiI111I == - 1 ) : return
Ooo = len ( prefix_str [ i1II1I + 1 : iiiI111I ] ) * 8
if 24 - 24: i1IIi - Oo0Ooo % Oo0Ooo
if 29 - 29: IiII
self . string_to_afi ( prefix_str )
self . store_address ( prefix_str )
self . mask_len = int ( Ooo )
if 94 - 94: I1IiiI * Oo0Ooo * OOooOOo + Oo0Ooo / I1Ii111
if 3 - 3: I11i * iII111i - OoooooooOO % OoOoOO00 % ooOoO0o
def zero_host_bits ( self ) :
if ( self . mask_len < 0 ) : return
iii1iiiiiiI1 = ( 2 ** self . mask_len ) - 1
iIiiI1i11Ii = self . addr_length ( ) * 8 - self . mask_len
iii1iiiiiiI1 <<= iIiiI1i11Ii
self . address &= iii1iiiiiiI1
if 14 - 14: iIii1I11I1II1 % i1IIi / I1IiiI + I1IiiI . iII111i
if 40 - 40: I1ii11iIi11i + Ii1I % OOooOOo * oO0o
def is_geo_string ( self , addr_str ) :
oo0OOo0O = addr_str . find ( "]" )
if ( oo0OOo0O != - 1 ) : addr_str = addr_str [ oo0OOo0O + 1 : : ]
if 77 - 77: OoooooooOO
O0OOoo = addr_str . split ( "/" )
if ( len ( O0OOoo ) == 2 ) :
if ( O0OOoo [ 1 ] . isdigit ( ) == False ) : return ( False )
if 54 - 54: I11i * Oo0Ooo
O0OOoo = O0OOoo [ 0 ]
O0OOoo = O0OOoo . split ( "-" )
I1i11I = len ( O0OOoo )
if ( I1i11I < 8 or I1i11I > 9 ) : return ( False )
if 19 - 19: IiII
for ii1iiII in range ( 0 , I1i11I ) :
if ( ii1iiII == 3 ) :
if ( O0OOoo [ ii1iiII ] in [ "N" , "S" ] ) : continue
return ( False )
if 99 - 99: oO0o + Oo0Ooo . IiII * I1IiiI
if ( ii1iiII == 7 ) :
if ( O0OOoo [ ii1iiII ] in [ "W" , "E" ] ) : continue
return ( False )
if 29 - 29: i11iIiiIii - oO0o - oO0o + I11i . OOooOOo . OoO0O00
if ( O0OOoo [ ii1iiII ] . isdigit ( ) == False ) : return ( False )
if 94 - 94: oO0o - o0oOOo0O0Ooo / I1ii11iIi11i . IiII - II111iiii - ooOoO0o
return ( True )
if 92 - 92: OoooooooOO + O0 * OOooOOo
if 1 - 1: O0
def string_to_afi ( self , addr_str ) :
if ( addr_str . count ( "'" ) == 2 ) :
self . afi = LISP_AFI_NAME
return
if 34 - 34: o0oOOo0O0Ooo * i1IIi + I1Ii111
if ( addr_str . find ( ":" ) != - 1 ) : self . afi = LISP_AFI_IPV6
elif ( addr_str . find ( "." ) != - 1 ) : self . afi = LISP_AFI_IPV4
elif ( addr_str . find ( "+" ) != - 1 ) : self . afi = LISP_AFI_E164
elif ( self . is_geo_string ( addr_str ) ) : self . afi = LISP_AFI_GEO_COORD
elif ( addr_str . find ( "-" ) != - 1 ) : self . afi = LISP_AFI_MAC
else : self . afi = LISP_AFI_NONE
if 46 - 46: IiII / i11iIiiIii
if 51 - 51: OoO0O00 - OoO0O00 + o0oOOo0O0Ooo * iII111i % II111iiii
def print_address ( self ) :
iIiIi1iI11iiI = self . print_address_no_iid ( )
II1 = "[" + str ( self . instance_id )
for II11iIII1i1I in self . iid_list : II1 += "," + str ( II11iIII1i1I )
II1 += "]"
iIiIi1iI11iiI = "{}{}" . format ( II1 , iIiIi1iI11iiI )
return ( iIiIi1iI11iiI )
if 7 - 7: O0 * OoO0O00 % IiII
if 76 - 76: iII111i - i1IIi
def print_address_no_iid ( self ) :
if ( self . is_ipv4 ( ) ) :
iIiIi1iI11iiI = self . address
o00oOO = iIiIi1iI11iiI >> 24
iiIi = ( iIiIi1iI11iiI >> 16 ) & 0xff
iI11III1 = ( iIiIi1iI11iiI >> 8 ) & 0xff
oooO0 = iIiIi1iI11iiI & 0xff
return ( "{}.{}.{}.{}" . format ( o00oOO , iiIi , iI11III1 , oooO0 ) )
elif ( self . is_ipv6 ( ) ) :
ooOOo0o = lisp_hex_string ( self . address ) . zfill ( 32 )
ooOOo0o = binascii . unhexlify ( ooOOo0o )
ooOOo0o = socket . inet_ntop ( socket . AF_INET6 , ooOOo0o )
return ( "{}" . format ( ooOOo0o ) )
elif ( self . is_geo_prefix ( ) ) :
return ( "{}" . format ( self . address . print_geo ( ) ) )
elif ( self . is_mac ( ) ) :
ooOOo0o = lisp_hex_string ( self . address ) . zfill ( 12 )
ooOOo0o = "{}-{}-{}" . format ( ooOOo0o [ 0 : 4 ] , ooOOo0o [ 4 : 8 ] ,
ooOOo0o [ 8 : 12 ] )
return ( "{}" . format ( ooOOo0o ) )
elif ( self . is_e164 ( ) ) :
ooOOo0o = lisp_hex_string ( self . address ) . zfill ( 15 )
return ( "+{}" . format ( ooOOo0o ) )
elif ( self . is_dist_name ( ) ) :
return ( "'{}'" . format ( self . address ) )
elif ( self . is_null ( ) ) :
return ( "no-address" )
if 17 - 17: I1IiiI
return ( "unknown-afi:{}" . format ( self . afi ) )
if 87 - 87: OoO0O00 + Ii1I - IiII % i11iIiiIii . OOooOOo / IiII
if 73 - 73: iIii1I11I1II1 - ooOoO0o . II111iiii % O0 + I1IiiI
def print_prefix ( self ) :
if ( self . is_ultimate_root ( ) ) : return ( "[*]" )
if ( self . is_iid_range ( ) ) :
if ( self . mask_len == 32 ) : return ( "[{}]" . format ( self . instance_id ) )
OOooOOoOoo = self . instance_id + ( 2 ** ( 32 - self . mask_len ) - 1 )
return ( "[{}-{}]" . format ( self . instance_id , OOooOOoOoo ) )
if 81 - 81: OOooOOo . OOooOOo
iIiIi1iI11iiI = self . print_address ( )
if ( self . is_dist_name ( ) ) : return ( iIiIi1iI11iiI )
if ( self . is_geo_prefix ( ) ) : return ( iIiIi1iI11iiI )
if 70 - 70: I1IiiI / I11i - II111iiii . o0oOOo0O0Ooo / O0
oo0OOo0O = iIiIi1iI11iiI . find ( "no-address" )
if ( oo0OOo0O == - 1 ) :
iIiIi1iI11iiI = "{}/{}" . format ( iIiIi1iI11iiI , str ( self . mask_len ) )
else :
iIiIi1iI11iiI = iIiIi1iI11iiI [ 0 : oo0OOo0O ]
if 29 - 29: OOooOOo . OOooOOo * iII111i % OoO0O00
return ( iIiIi1iI11iiI )
if 66 - 66: Ii1I / OoO0O00 * i11iIiiIii * oO0o . iIii1I11I1II1
if 16 - 16: Oo0Ooo % IiII * o0oOOo0O0Ooo % OoOoOO00 - OoooooooOO
def print_prefix_no_iid ( self ) :
iIiIi1iI11iiI = self . print_address_no_iid ( )
if ( self . is_dist_name ( ) ) : return ( iIiIi1iI11iiI )
if ( self . is_geo_prefix ( ) ) : return ( iIiIi1iI11iiI )
return ( "{}/{}" . format ( iIiIi1iI11iiI , str ( self . mask_len ) ) )
if 61 - 61: i11iIiiIii - i1IIi + iIii1I11I1II1 * I1IiiI % OoOoOO00 . oO0o
if 24 - 24: iII111i . i1IIi * I1ii11iIi11i
def print_prefix_url ( self ) :
if ( self . is_ultimate_root ( ) ) : return ( "0--0" )
iIiIi1iI11iiI = self . print_address ( )
oo0OOo0O = iIiIi1iI11iiI . find ( "]" )
if ( oo0OOo0O != - 1 ) : iIiIi1iI11iiI = iIiIi1iI11iiI [ oo0OOo0O + 1 : : ]
if ( self . is_geo_prefix ( ) ) :
iIiIi1iI11iiI = iIiIi1iI11iiI . replace ( "/" , "-" )
return ( "{}-{}" . format ( self . instance_id , iIiIi1iI11iiI ) )
if 1 - 1: oO0o / OoOoOO00 + I1IiiI
return ( "{}-{}-{}" . format ( self . instance_id , iIiIi1iI11iiI , self . mask_len ) )
if 47 - 47: O0 / OOooOOo . i1IIi / OoooooooOO . IiII
if 34 - 34: OoO0O00 * II111iiii + I1Ii111
def print_sg ( self , g ) :
o00oOOO = self . print_prefix ( )
IiiiI1 = o00oOOO . find ( "]" ) + 1
g = g . print_prefix ( )
iiI1i1I1I = g . find ( "]" ) + 1
o0o = "[{}]({}, {})" . format ( self . instance_id , o00oOOO [ IiiiI1 : : ] , g [ iiI1i1I1I : : ] )
return ( o0o )
if 66 - 66: i1IIi - Oo0Ooo
if 39 - 39: I11i * O0 + OoO0O00
def hash_address ( self , addr ) :
OOoO0oO00o = self . address
OOO0OoO0oo0OO = addr . address
if 42 - 42: O0 / I1IiiI * Ii1I / iIii1I11I1II1 . i1IIi / I1IiiI
if ( self . is_geo_prefix ( ) ) : OOoO0oO00o = self . address . print_geo ( )
if ( addr . is_geo_prefix ( ) ) : OOO0OoO0oo0OO = addr . address . print_geo ( )
if 66 - 66: I1ii11iIi11i % I1ii11iIi11i % I1ii11iIi11i % ooOoO0o + OoOoOO00
if ( type ( OOoO0oO00o ) == str ) :
OOoO0oO00o = int ( binascii . hexlify ( OOoO0oO00o [ 0 : 1 ] ) )
if 55 - 55: OoooooooOO / OoOoOO00 % Oo0Ooo * OoO0O00 . OoooooooOO . OOooOOo
if ( type ( OOO0OoO0oo0OO ) == str ) :
OOO0OoO0oo0OO = int ( binascii . hexlify ( OOO0OoO0oo0OO [ 0 : 1 ] ) )
if 79 - 79: i11iIiiIii / ooOoO0o / i11iIiiIii - I1Ii111
return ( OOoO0oO00o ^ OOO0OoO0oo0OO )
if 89 - 89: Oo0Ooo
if 15 - 15: OOooOOo * II111iiii - OOooOOo * iIii1I11I1II1
if 95 - 95: I1Ii111 / OoooooooOO * I11i * OoooooooOO
if 88 - 88: I1IiiI / Oo0Ooo / oO0o + oO0o % OOooOOo + Oo0Ooo
if 63 - 63: o0oOOo0O0Ooo + i11iIiiIii % OOooOOo % iIii1I11I1II1 / I1ii11iIi11i - iII111i
if 72 - 72: iII111i % oO0o . IiII + I1ii11iIi11i . IiII . II111iiii
def is_more_specific ( self , prefix ) :
if ( prefix . afi == LISP_AFI_ULTIMATE_ROOT ) : return ( True )
if 10 - 10: I11i . ooOoO0o + I11i * Ii1I
Ooo = prefix . mask_len
if ( prefix . afi == LISP_AFI_IID_RANGE ) :
O0O0O = 2 ** ( 32 - Ooo )
iI1i11II = prefix . instance_id
OOooOOoOoo = iI1i11II + O0O0O
return ( self . instance_id in range ( iI1i11II , OOooOOoOoo ) )
if 7 - 7: oO0o - I11i / OoOoOO00 * I1Ii111 - Ii1I - i11iIiiIii
if 57 - 57: IiII % i1IIi
if ( self . instance_id != prefix . instance_id ) : return ( False )
if ( self . afi != prefix . afi ) :
if ( prefix . afi != LISP_AFI_NONE ) : return ( False )
if 74 - 74: iII111i % I11i * i11iIiiIii . i11iIiiIii + iIii1I11I1II1 * i1IIi
if 53 - 53: I1ii11iIi11i + IiII / OOooOOo . OoooooooOO - ooOoO0o
if 47 - 47: i11iIiiIii
if 21 - 21: i1IIi - oO0o - Oo0Ooo
if 11 - 11: i1IIi
if ( self . is_binary ( ) == False ) :
if ( prefix . afi == LISP_AFI_NONE ) : return ( True )
if ( type ( self . address ) != type ( prefix . address ) ) : return ( False )
iIiIi1iI11iiI = self . address
O00o0Oo = prefix . address
if ( self . is_geo_prefix ( ) ) :
iIiIi1iI11iiI = self . address . print_geo ( )
O00o0Oo = prefix . address . print_geo ( )
if 56 - 56: I1Ii111 * i1IIi % i11iIiiIii
if ( len ( iIiIi1iI11iiI ) < len ( O00o0Oo ) ) : return ( False )
return ( iIiIi1iI11iiI . find ( O00o0Oo ) == 0 )
if 56 - 56: Ii1I . iII111i
if 76 - 76: I1IiiI / Ii1I % OoOoOO00 + IiII / i11iIiiIii . o0oOOo0O0Ooo
if 31 - 31: oO0o * oO0o % o0oOOo0O0Ooo . O0 + iII111i
if 52 - 52: i11iIiiIii
if 1 - 1: i1IIi * iIii1I11I1II1
if ( self . mask_len < Ooo ) : return ( False )
if 29 - 29: I11i
iIiiI1i11Ii = ( prefix . addr_length ( ) * 8 ) - Ooo
iii1iiiiiiI1 = ( 2 ** Ooo - 1 ) << iIiiI1i11Ii
return ( ( self . address & iii1iiiiiiI1 ) == prefix . address )
if 12 - 12: oO0o % i1IIi - oO0o / ooOoO0o * II111iiii % ooOoO0o
if 6 - 6: IiII / OoO0O00
def mask_address ( self , mask_len ) :
iIiiI1i11Ii = ( self . addr_length ( ) * 8 ) - mask_len
iii1iiiiiiI1 = ( 2 ** mask_len - 1 ) << iIiiI1i11Ii
self . address &= iii1iiiiiiI1
if 83 - 83: IiII - iIii1I11I1II1 * ooOoO0o - oO0o
if 77 - 77: Ii1I
def is_exact_match ( self , prefix ) :
if ( self . instance_id != prefix . instance_id ) : return ( False )
i1Ii = self . print_prefix ( )
Oo00OOoO = prefix . print_prefix ( ) if prefix else ""
return ( i1Ii == Oo00OOoO )
if 20 - 20: I1Ii111
if 33 - 33: i11iIiiIii / I1Ii111 + IiII / II111iiii + I11i
def is_local ( self ) :
if ( self . is_ipv4 ( ) ) :
IiI1iIi1I1i = lisp_myrlocs [ 0 ]
if ( IiI1iIi1I1i == None ) : return ( False )
IiI1iIi1I1i = IiI1iIi1I1i . print_address_no_iid ( )
return ( self . print_address_no_iid ( ) == IiI1iIi1I1i )
if 44 - 44: OoOoOO00 / OoooooooOO % O0 * Ii1I * IiII
if ( self . is_ipv6 ( ) ) :
IiI1iIi1I1i = lisp_myrlocs [ 1 ]
if ( IiI1iIi1I1i == None ) : return ( False )
IiI1iIi1I1i = IiI1iIi1I1i . print_address_no_iid ( )
return ( self . print_address_no_iid ( ) == IiI1iIi1I1i )
if 84 - 84: o0oOOo0O0Ooo * IiII * OOooOOo * iII111i
return ( False )
if 56 - 56: iII111i * II111iiii . OoooooooOO . I11i
if 25 - 25: ooOoO0o % o0oOOo0O0Ooo - i11iIiiIii
def store_iid_range ( self , iid , mask_len ) :
if ( self . afi == LISP_AFI_NONE ) :
if ( iid is 0 and mask_len is 0 ) : self . afi = LISP_AFI_ULTIMATE_ROOT
else : self . afi = LISP_AFI_IID_RANGE
if 79 - 79: iII111i - I1IiiI % O0 / Oo0Ooo + OoOoOO00 . Oo0Ooo
self . instance_id = iid
self . mask_len = mask_len
if 59 - 59: I1ii11iIi11i * OoOoOO00 / Ii1I
if 80 - 80: IiII - ooOoO0o / OoOoOO00 / I11i * O0 + oO0o
def lcaf_length ( self , lcaf_type ) :
OOOOO000oo0 = self . addr_length ( ) + 2
if ( lcaf_type == LISP_LCAF_AFI_LIST_TYPE ) : OOOOO000oo0 += 4
if ( lcaf_type == LISP_LCAF_INSTANCE_ID_TYPE ) : OOOOO000oo0 += 4
if ( lcaf_type == LISP_LCAF_ASN_TYPE ) : OOOOO000oo0 += 4
if ( lcaf_type == LISP_LCAF_APP_DATA_TYPE ) : OOOOO000oo0 += 8
if ( lcaf_type == LISP_LCAF_GEO_COORD_TYPE ) : OOOOO000oo0 += 12
if ( lcaf_type == LISP_LCAF_OPAQUE_TYPE ) : OOOOO000oo0 += 0
if ( lcaf_type == LISP_LCAF_NAT_TYPE ) : OOOOO000oo0 += 4
if ( lcaf_type == LISP_LCAF_NONCE_LOC_TYPE ) : OOOOO000oo0 += 4
if ( lcaf_type == LISP_LCAF_MCAST_INFO_TYPE ) : OOOOO000oo0 = OOOOO000oo0 * 2 + 8
if ( lcaf_type == LISP_LCAF_ELP_TYPE ) : OOOOO000oo0 += 0
if ( lcaf_type == LISP_LCAF_SECURITY_TYPE ) : OOOOO000oo0 += 6
if ( lcaf_type == LISP_LCAF_SOURCE_DEST_TYPE ) : OOOOO000oo0 += 4
if ( lcaf_type == LISP_LCAF_RLE_TYPE ) : OOOOO000oo0 += 4
return ( OOOOO000oo0 )
if 77 - 77: ooOoO0o + I1ii11iIi11i * o0oOOo0O0Ooo / i1IIi * I11i
if 70 - 70: oO0o / iII111i * i1IIi / II111iiii / OoOoOO00 + oO0o
if 30 - 30: i1IIi - iII111i - i11iIiiIii . OoOoOO00 . o0oOOo0O0Ooo
if 74 - 74: i11iIiiIii / II111iiii
if 62 - 62: O0
if 63 - 63: Oo0Ooo + Oo0Ooo
if 48 - 48: Oo0Ooo * I1ii11iIi11i % II111iiii
if 42 - 42: I1Ii111 - ooOoO0o % o0oOOo0O0Ooo * I1IiiI . o0oOOo0O0Ooo
if 84 - 84: iIii1I11I1II1
if 39 - 39: Ii1I . II111iiii / I1IiiI
if 44 - 44: Ii1I / Ii1I / OoO0O00 % ooOoO0o / I11i . I1ii11iIi11i
if 41 - 41: I1ii11iIi11i * ooOoO0o * I11i + O0 * O0 - O0
if 81 - 81: I1Ii111 % OoO0O00 / O0
if 55 - 55: i1IIi - I1Ii111 + I11i
if 93 - 93: I1IiiI % IiII . OoOoOO00 + iII111i
if 81 - 81: ooOoO0o / I1Ii111 + OOooOOo / Oo0Ooo / OoOoOO00
if 34 - 34: ooOoO0o * iIii1I11I1II1 % i11iIiiIii * OOooOOo - OOooOOo
def lcaf_encode_iid ( self ) :
o0O00o0o = LISP_LCAF_INSTANCE_ID_TYPE
ii11ii11II = socket . htons ( self . lcaf_length ( o0O00o0o ) )
II1 = self . instance_id
o0o0O00oOo = self . afi
IIiiiII = 0
if ( o0o0O00oOo < 0 ) :
if ( self . afi == LISP_AFI_GEO_COORD ) :
o0o0O00oOo = LISP_AFI_LCAF
IIiiiII = 0
else :
o0o0O00oOo = 0
IIiiiII = self . mask_len
if 63 - 63: Oo0Ooo / oO0o + iII111i % OoooooooOO * I11i
if 34 - 34: I1IiiI + I1Ii111 % ooOoO0o
if 24 - 24: Ii1I % II111iiii - i11iIiiIii
o00O000 = struct . pack ( "BBBBH" , 0 , 0 , o0O00o0o , IIiiiII , ii11ii11II )
o00O000 += struct . pack ( "IH" , socket . htonl ( II1 ) , socket . htons ( o0o0O00oOo ) )
if ( o0o0O00oOo == 0 ) : return ( o00O000 )
if 21 - 21: oO0o . OoOoOO00 - iIii1I11I1II1 + OOooOOo * I11i . i1IIi
if ( self . afi == LISP_AFI_GEO_COORD ) :
o00O000 = o00O000 [ 0 : - 2 ]
o00O000 += self . address . encode_geo ( )
return ( o00O000 )
if 59 - 59: I1ii11iIi11i / i11iIiiIii / iII111i + OoO0O00
if 56 - 56: OOooOOo * i11iIiiIii - i11iIiiIii * I1IiiI + iII111i . OoOoOO00
o00O000 += self . pack_address ( )
return ( o00O000 )
if 49 - 49: I1ii11iIi11i % oO0o - I1Ii111 . I1ii11iIi11i % II111iiii
if 20 - 20: I1ii11iIi11i . iIii1I11I1II1 - Ii1I % OoO0O00
def lcaf_decode_iid ( self , packet ) :
IIiI1I11ii1i = "BBBBH"
i1II1i1iiI1 = struct . calcsize ( IIiI1I11ii1i )
if ( len ( packet ) < i1II1i1iiI1 ) : return ( None )
if 27 - 27: iIii1I11I1II1 / I1Ii111 - I11i . OoO0O00 + ooOoO0o
IiiIii1111Ii1I1 , iIIIIi , o0O00o0o , ooO000000O , OOOOO000oo0 = struct . unpack ( IIiI1I11ii1i ,
packet [ : i1II1i1iiI1 ] )
packet = packet [ i1II1i1iiI1 : : ]
if 36 - 36: oO0o - I1Ii111
if ( o0O00o0o != LISP_LCAF_INSTANCE_ID_TYPE ) : return ( None )
if 55 - 55: oO0o
IIiI1I11ii1i = "IH"
i1II1i1iiI1 = struct . calcsize ( IIiI1I11ii1i )
if ( len ( packet ) < i1II1i1iiI1 ) : return ( None )
if 10 - 10: I1IiiI
II1 , o0o0O00oOo = struct . unpack ( IIiI1I11ii1i , packet [ : i1II1i1iiI1 ] )
packet = packet [ i1II1i1iiI1 : : ]
if 17 - 17: i11iIiiIii % o0oOOo0O0Ooo . ooOoO0o
OOOOO000oo0 = socket . ntohs ( OOOOO000oo0 )
self . instance_id = socket . ntohl ( II1 )
o0o0O00oOo = socket . ntohs ( o0o0O00oOo )
self . afi = o0o0O00oOo
if ( ooO000000O != 0 and o0o0O00oOo == 0 ) : self . mask_len = ooO000000O
if ( o0o0O00oOo == 0 ) :
self . afi = LISP_AFI_IID_RANGE if ooO000000O else LISP_AFI_ULTIMATE_ROOT
if 34 - 34: OoooooooOO / iII111i / O0
if 75 - 75: I11i % OOooOOo - OoO0O00 * I11i * IiII
if 11 - 11: I1ii11iIi11i . O0 - iII111i * IiII . i1IIi . iII111i
if 82 - 82: i1IIi * I11i * Ii1I - IiII . i11iIiiIii
if 40 - 40: OOooOOo - OoooooooOO
if ( o0o0O00oOo == 0 ) : return ( packet )
if 36 - 36: i1IIi % OoOoOO00 - i1IIi
if 5 - 5: I1IiiI . I1IiiI % II111iiii - I1Ii111
if 97 - 97: I11i . ooOoO0o
if 87 - 87: oO0o / iIii1I11I1II1 - I11i + OoooooooOO
if ( self . is_dist_name ( ) ) :
packet , self . address = lisp_decode_dist_name ( packet )
self . mask_len = len ( self . address ) * 8
return ( packet )
if 79 - 79: I1ii11iIi11i * IiII . I1ii11iIi11i
if 65 - 65: iII111i - Ii1I - II111iiii * O0 + I1ii11iIi11i . iIii1I11I1II1
if 76 - 76: OoO0O00 * ooOoO0o
if 32 - 32: O0 . oO0o * o0oOOo0O0Ooo . Ii1I + IiII
if 98 - 98: iII111i . II111iiii % O0
if ( o0o0O00oOo == LISP_AFI_LCAF ) :
IIiI1I11ii1i = "BBBBH"
i1II1i1iiI1 = struct . calcsize ( IIiI1I11ii1i )
if ( len ( packet ) < i1II1i1iiI1 ) : return ( None )
if 43 - 43: OOooOOo % I1Ii111 . IiII % OoO0O00 + I1Ii111 % OoooooooOO
OoO0oOoo , I11I , o0O00o0o , ii11iIII111 , i11iii11 = struct . unpack ( IIiI1I11ii1i , packet [ : i1II1i1iiI1 ] )
if 17 - 17: OoooooooOO - i1IIi * I11i
if 33 - 33: i1IIi . Oo0Ooo + I11i
if ( o0O00o0o != LISP_LCAF_GEO_COORD_TYPE ) : return ( None )
if 97 - 97: OOooOOo / IiII / ooOoO0o / OoooooooOO
i11iii11 = socket . ntohs ( i11iii11 )
packet = packet [ i1II1i1iiI1 : : ]
if ( i11iii11 > len ( packet ) ) : return ( None )
if 78 - 78: I1Ii111 + I1Ii111
O0OOoo = lisp_geo ( "" )
self . afi = LISP_AFI_GEO_COORD
self . address = O0OOoo
packet = O0OOoo . decode_geo ( packet , i11iii11 , ii11iIII111 )
self . mask_len = self . host_mask_len ( )
return ( packet )
if 43 - 43: I1Ii111 * o0oOOo0O0Ooo + i1IIi
if 19 - 19: Ii1I
ii11ii11II = self . addr_length ( )
if ( len ( packet ) < ii11ii11II ) : return ( None )
if 51 - 51: oO0o
packet = self . unpack_address ( packet )
return ( packet )
if 57 - 57: i11iIiiIii - Oo0Ooo + I1Ii111 * OoO0O00
if 35 - 35: o0oOOo0O0Ooo % II111iiii + O0
if 70 - 70: I1ii11iIi11i . II111iiii
if 54 - 54: OOooOOo
if 67 - 67: I1IiiI . o0oOOo0O0Ooo / i1IIi * I1ii11iIi11i . Oo0Ooo + II111iiii
if 63 - 63: OoOoOO00 - OoOoOO00
if 31 - 31: I1ii11iIi11i % O0 - i11iIiiIii * o0oOOo0O0Ooo . ooOoO0o * ooOoO0o
if 18 - 18: OoO0O00 - OoO0O00 . o0oOOo0O0Ooo
if 80 - 80: I11i + I1Ii111 / I1IiiI * OOooOOo % iII111i
if 48 - 48: iIii1I11I1II1 + i1IIi . I1IiiI % OoO0O00 - iIii1I11I1II1 / i1IIi
if 14 - 14: IiII . I11i
if 13 - 13: OoOoOO00 - I11i . OOooOOo % OoO0O00
if 79 - 79: iII111i / Ii1I % i11iIiiIii . I1IiiI % OoO0O00 / i11iIiiIii
if 100 - 100: OOooOOo + Oo0Ooo . iIii1I11I1II1 . ooOoO0o * Oo0Ooo
if 16 - 16: Oo0Ooo % OoOoOO00 + I1Ii111 % I1Ii111
if 12 - 12: I1Ii111 . Ii1I / iIii1I11I1II1 + i1IIi
if 9 - 9: iIii1I11I1II1
if 75 - 75: I11i . II111iiii * I1IiiI * IiII
if 36 - 36: OOooOOo / I1ii11iIi11i / oO0o / ooOoO0o / I11i
if 7 - 7: OoO0O00 - I11i - o0oOOo0O0Ooo / o0oOOo0O0Ooo + i11iIiiIii
if 28 - 28: OoOoOO00 % ooOoO0o . I1IiiI + II111iiii
def lcaf_encode_sg ( self , group ) :
o0O00o0o = LISP_LCAF_MCAST_INFO_TYPE
II1 = socket . htonl ( self . instance_id )
ii11ii11II = socket . htons ( self . lcaf_length ( o0O00o0o ) )
o00O000 = struct . pack ( "BBBBHIHBB" , 0 , 0 , o0O00o0o , 0 , ii11ii11II , II1 ,
0 , self . mask_len , group . mask_len )
if 34 - 34: iIii1I11I1II1
o00O000 += struct . pack ( "H" , socket . htons ( self . afi ) )
o00O000 += self . pack_address ( )
o00O000 += struct . pack ( "H" , socket . htons ( group . afi ) )
o00O000 += group . pack_address ( )
return ( o00O000 )
if 65 - 65: II111iiii - iII111i / o0oOOo0O0Ooo
if 35 - 35: i11iIiiIii - Oo0Ooo . I1ii11iIi11i % OoOoOO00
def lcaf_decode_sg ( self , packet ) :
IIiI1I11ii1i = "BBBBHIHBB"
i1II1i1iiI1 = struct . calcsize ( IIiI1I11ii1i )
if ( len ( packet ) < i1II1i1iiI1 ) : return ( [ None , None ] )
if 20 - 20: OoO0O00
IiiIii1111Ii1I1 , iIIIIi , o0O00o0o , I1IiII , OOOOO000oo0 , II1 , o0OOOO , iI1iiIii1Ii , iiI1ii1i = struct . unpack ( IIiI1I11ii1i , packet [ : i1II1i1iiI1 ] )
if 66 - 66: o0oOOo0O0Ooo - Oo0Ooo . I1IiiI / I11i * OoooooooOO % i1IIi
packet = packet [ i1II1i1iiI1 : : ]
if 1 - 1: OoOoOO00 * O0 + i11iIiiIii . ooOoO0o / OoO0O00
if ( o0O00o0o != LISP_LCAF_MCAST_INFO_TYPE ) : return ( [ None , None ] )
if 48 - 48: o0oOOo0O0Ooo * II111iiii
self . instance_id = socket . ntohl ( II1 )
OOOOO000oo0 = socket . ntohs ( OOOOO000oo0 ) - 8
if 17 - 17: o0oOOo0O0Ooo / ooOoO0o + i1IIi
if 78 - 78: iIii1I11I1II1 * o0oOOo0O0Ooo * Oo0Ooo - OoO0O00 / OoO0O00
if 89 - 89: o0oOOo0O0Ooo % o0oOOo0O0Ooo
if 8 - 8: Ii1I % oO0o - o0oOOo0O0Ooo
if 14 - 14: OOooOOo * IiII
IIiI1I11ii1i = "H"
i1II1i1iiI1 = struct . calcsize ( IIiI1I11ii1i )
if ( len ( packet ) < i1II1i1iiI1 ) : return ( [ None , None ] )
if ( OOOOO000oo0 < i1II1i1iiI1 ) : return ( [ None , None ] )
if 15 - 15: o0oOOo0O0Ooo + OoooooooOO - OOooOOo - o0oOOo0O0Ooo . iIii1I11I1II1 / Ii1I
o0o0O00oOo = struct . unpack ( IIiI1I11ii1i , packet [ : i1II1i1iiI1 ] ) [ 0 ]
packet = packet [ i1II1i1iiI1 : : ]
OOOOO000oo0 -= i1II1i1iiI1
self . afi = socket . ntohs ( o0o0O00oOo )
self . mask_len = iI1iiIii1Ii
ii11ii11II = self . addr_length ( )
if ( OOOOO000oo0 < ii11ii11II ) : return ( [ None , None ] )
if 33 - 33: OoO0O00
packet = self . unpack_address ( packet )
if ( packet == None ) : return ( [ None , None ] )
if 91 - 91: I11i % I11i % iII111i
OOOOO000oo0 -= ii11ii11II
if 19 - 19: I11i / I11i + I1IiiI * OoO0O00 - iII111i . Oo0Ooo
if 76 - 76: iII111i % OOooOOo / OoooooooOO . I1IiiI % OoO0O00 % i1IIi
if 95 - 95: Oo0Ooo - O0 / I1ii11iIi11i . I1IiiI / o0oOOo0O0Ooo % OoOoOO00
if 38 - 38: OoOoOO00 % OoooooooOO . oO0o - OoooooooOO + I11i
if 18 - 18: OoooooooOO + ooOoO0o * OoOoOO00 - OoO0O00
IIiI1I11ii1i = "H"
i1II1i1iiI1 = struct . calcsize ( IIiI1I11ii1i )
if ( len ( packet ) < i1II1i1iiI1 ) : return ( [ None , None ] )
if ( OOOOO000oo0 < i1II1i1iiI1 ) : return ( [ None , None ] )
if 42 - 42: oO0o % OoOoOO00 - oO0o + I11i / i11iIiiIii
o0o0O00oOo = struct . unpack ( IIiI1I11ii1i , packet [ : i1II1i1iiI1 ] ) [ 0 ]
packet = packet [ i1II1i1iiI1 : : ]
OOOOO000oo0 -= i1II1i1iiI1
i1i11Ii1 = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
i1i11Ii1 . afi = socket . ntohs ( o0o0O00oOo )
i1i11Ii1 . mask_len = iiI1ii1i
i1i11Ii1 . instance_id = self . instance_id
ii11ii11II = self . addr_length ( )
if ( OOOOO000oo0 < ii11ii11II ) : return ( [ None , None ] )
if 74 - 74: OoO0O00 - II111iiii - ooOoO0o % i1IIi
packet = i1i11Ii1 . unpack_address ( packet )
if ( packet == None ) : return ( [ None , None ] )
if 42 - 42: i11iIiiIii / O0
return ( [ packet , i1i11Ii1 ] )
if 8 - 8: I1Ii111
if 51 - 51: i11iIiiIii
def lcaf_decode_eid ( self , packet ) :
IIiI1I11ii1i = "BBB"
i1II1i1iiI1 = struct . calcsize ( IIiI1I11ii1i )
if ( len ( packet ) < i1II1i1iiI1 ) : return ( [ None , None ] )
if 1 - 1: iIii1I11I1II1 . i1IIi . i11iIiiIii % I1ii11iIi11i
if 58 - 58: i11iIiiIii * i11iIiiIii - OoO0O00
if 8 - 8: i11iIiiIii * OoOoOO00 . o0oOOo0O0Ooo
if 27 - 27: I1ii11iIi11i + Ii1I % I1Ii111
if 20 - 20: Oo0Ooo
I1IiII , I11I , o0O00o0o = struct . unpack ( IIiI1I11ii1i ,
packet [ : i1II1i1iiI1 ] )
if 33 - 33: oO0o - OoOoOO00 - i11iIiiIii + I1Ii111 + iIii1I11I1II1
if ( o0O00o0o == LISP_LCAF_INSTANCE_ID_TYPE ) :
return ( [ self . lcaf_decode_iid ( packet ) , None ] )
elif ( o0O00o0o == LISP_LCAF_MCAST_INFO_TYPE ) :
packet , i1i11Ii1 = self . lcaf_decode_sg ( packet )
return ( [ packet , i1i11Ii1 ] )
elif ( o0O00o0o == LISP_LCAF_GEO_COORD_TYPE ) :
IIiI1I11ii1i = "BBBBH"
i1II1i1iiI1 = struct . calcsize ( IIiI1I11ii1i )
if ( len ( packet ) < i1II1i1iiI1 ) : return ( None )
if 2 - 2: OoooooooOO + IiII / iII111i . iIii1I11I1II1 * OoOoOO00
OoO0oOoo , I11I , o0O00o0o , ii11iIII111 , i11iii11 = struct . unpack ( IIiI1I11ii1i , packet [ : i1II1i1iiI1 ] )
if 84 - 84: OOooOOo
if 68 - 68: I1Ii111
if ( o0O00o0o != LISP_LCAF_GEO_COORD_TYPE ) : return ( None )
if 92 - 92: oO0o * Ii1I / OoO0O00 % II111iiii
i11iii11 = socket . ntohs ( i11iii11 )
packet = packet [ i1II1i1iiI1 : : ]
if ( i11iii11 > len ( packet ) ) : return ( None )
if 54 - 54: oO0o + I11i - OoO0O00
O0OOoo = lisp_geo ( "" )
self . instance_id = 0
self . afi = LISP_AFI_GEO_COORD
self . address = O0OOoo
packet = O0OOoo . decode_geo ( packet , i11iii11 , ii11iIII111 )
self . mask_len = self . host_mask_len ( )
if 86 - 86: OoooooooOO
return ( [ packet , None ] )
if 51 - 51: i11iIiiIii
if 91 - 91: OOooOOo
if 22 - 22: OoooooooOO + OoOoOO00 - Ii1I . iII111i / OoooooooOO / I1IiiI
if 73 - 73: i1IIi - Ii1I + oO0o * iIii1I11I1II1
if 100 - 100: i11iIiiIii / iIii1I11I1II1 + Oo0Ooo + OoO0O00 - iII111i
if 8 - 8: i11iIiiIii . O0 + o0oOOo0O0Ooo * oO0o + II111iiii
class lisp_elp_node ( ) :
def __init__ ( self ) :
self . address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . probe = False
self . strict = False
self . eid = False
self . we_are_last = False
if 61 - 61: ooOoO0o / ooOoO0o
if 51 - 51: iIii1I11I1II1 / oO0o * I1Ii111 + i1IIi
def copy_elp_node ( self ) :
IIi1IiIii1 = lisp_elp_node ( )
IIi1IiIii1 . copy_address ( self . address )
IIi1IiIii1 . probe = self . probe
IIi1IiIii1 . strict = self . strict
IIi1IiIii1 . eid = self . eid
IIi1IiIii1 . we_are_last = self . we_are_last
return ( IIi1IiIii1 )
if 96 - 96: Oo0Ooo + oO0o - Oo0Ooo - OoOoOO00 % OOooOOo . iIii1I11I1II1
if 93 - 93: iIii1I11I1II1 % OoooooooOO
if 6 - 6: II111iiii / oO0o - OOooOOo . O0 - o0oOOo0O0Ooo
class lisp_elp ( ) :
def __init__ ( self , name ) :
self . elp_name = name
self . elp_nodes = [ ]
self . use_elp_node = None
self . we_are_last = False
if 72 - 72: iIii1I11I1II1 / OoooooooOO * ooOoO0o / ooOoO0o % O0 + IiII
if 96 - 96: iII111i / i11iIiiIii + Oo0Ooo . I1IiiI + iII111i % OoOoOO00
def copy_elp ( self ) :
O0Oooo0 = lisp_elp ( self . elp_name )
O0Oooo0 . use_elp_node = self . use_elp_node
O0Oooo0 . we_are_last = self . we_are_last
for IIi1IiIii1 in self . elp_nodes :
O0Oooo0 . elp_nodes . append ( IIi1IiIii1 . copy_elp_node ( ) )
if 19 - 19: i11iIiiIii . Oo0Ooo . OoOoOO00 - I1IiiI
return ( O0Oooo0 )
if 85 - 85: I11i - OoO0O00 % iIii1I11I1II1 . iII111i + ooOoO0o . Oo0Ooo
if 87 - 87: iII111i
def print_elp ( self , want_marker ) :
iiiII1i11iII = ""
for IIi1IiIii1 in self . elp_nodes :
o000oOoO = ""
if ( want_marker ) :
if ( IIi1IiIii1 == self . use_elp_node ) :
o000oOoO = "*"
elif ( IIi1IiIii1 . we_are_last ) :
o000oOoO = "x"
if 24 - 24: ooOoO0o / OoooooooOO % I1ii11iIi11i * ooOoO0o
if 14 - 14: I1ii11iIi11i + OoO0O00 - I1IiiI - Oo0Ooo
iiiII1i11iII += "{}{}({}{}{}), " . format ( o000oOoO ,
IIi1IiIii1 . address . print_address_no_iid ( ) ,
"r" if IIi1IiIii1 . eid else "R" , "P" if IIi1IiIii1 . probe else "p" ,
"S" if IIi1IiIii1 . strict else "s" )
if 44 - 44: II111iiii / I1ii11iIi11i
return ( iiiII1i11iII [ 0 : - 2 ] if iiiII1i11iII != "" else "" )
if 39 - 39: OoooooooOO % OoO0O00
if 83 - 83: OOooOOo % I1IiiI + O0 % OoooooooOO
def select_elp_node ( self ) :
O00OO0ooo , o0oOO000 , oO00O = lisp_myrlocs
oo0OOo0O = None
if 91 - 91: I1Ii111 * iII111i * OoO0O00
for IIi1IiIii1 in self . elp_nodes :
if ( O00OO0ooo and IIi1IiIii1 . address . is_exact_match ( O00OO0ooo ) ) :
oo0OOo0O = self . elp_nodes . index ( IIi1IiIii1 )
break
if 79 - 79: iII111i + oO0o
if ( o0oOO000 and IIi1IiIii1 . address . is_exact_match ( o0oOO000 ) ) :
oo0OOo0O = self . elp_nodes . index ( IIi1IiIii1 )
break
if 19 - 19: I1Ii111 - OOooOOo . ooOoO0o . O0 + II111iiii . OoooooooOO
if 97 - 97: O0 / OoOoOO00 / ooOoO0o
if 11 - 11: II111iiii . i11iIiiIii - Ii1I . IiII
if 10 - 10: OOooOOo * OoooooooOO
if 12 - 12: II111iiii - O0 . i1IIi % oO0o % OoooooooOO
if 36 - 36: IiII * OoOoOO00 - iIii1I11I1II1 + II111iiii
if 65 - 65: I1IiiI * I11i . I1Ii111 % I1ii11iIi11i + O0
if ( oo0OOo0O == None ) :
self . use_elp_node = self . elp_nodes [ 0 ]
IIi1IiIii1 . we_are_last = False
return
if 91 - 91: OoooooooOO % I1Ii111 * OoO0O00 - OoOoOO00
if 5 - 5: iIii1I11I1II1 * I11i - oO0o % oO0o % o0oOOo0O0Ooo . i1IIi
if 95 - 95: Oo0Ooo * I1ii11iIi11i + iII111i - o0oOOo0O0Ooo - Oo0Ooo . OoO0O00
if 62 - 62: I11i
if 58 - 58: I11i . OoOoOO00 + iII111i . iII111i
if 43 - 43: I1Ii111 + I1Ii111 % Oo0Ooo % OoO0O00 - ooOoO0o
if ( self . elp_nodes [ - 1 ] == self . elp_nodes [ oo0OOo0O ] ) :
self . use_elp_node = None
IIi1IiIii1 . we_are_last = True
return
if 61 - 61: OoOoOO00 + Ii1I % i11iIiiIii - I1IiiI * OoO0O00 % iIii1I11I1II1
if 66 - 66: iII111i + i1IIi
if 24 - 24: O0 / OoooooooOO - OoOoOO00
if 51 - 51: OoO0O00 + o0oOOo0O0Ooo - II111iiii * I11i + Ii1I
if 16 - 16: I1Ii111 * i1IIi . I1IiiI . OOooOOo % Ii1I - o0oOOo0O0Ooo
self . use_elp_node = self . elp_nodes [ oo0OOo0O + 1 ]
return
if 89 - 89: Ii1I * I1ii11iIi11i * I1IiiI % iII111i % Ii1I + O0
if 53 - 53: i11iIiiIii % I1ii11iIi11i
if 59 - 59: OOooOOo
class lisp_geo ( ) :
def __init__ ( self , name ) :
self . geo_name = name
self . latitude = 0xffffffff
self . lat_mins = 0
self . lat_secs = 0
self . longitude = 0xffffffff
self . long_mins = 0
self . long_secs = 0
self . altitude = - 1
self . radius = 0
if 61 - 61: OoooooooOO + O0 - i1IIi % oO0o / I1ii11iIi11i
if 50 - 50: oO0o + II111iiii * OoOoOO00 % OoO0O00 . II111iiii % o0oOOo0O0Ooo
def copy_geo ( self ) :
O0OOoo = lisp_geo ( self . geo_name )
O0OOoo . latitude = self . latitude
O0OOoo . lat_mins = self . lat_mins
O0OOoo . lat_secs = self . lat_secs
O0OOoo . longitude = self . longitude
O0OOoo . long_mins = self . long_mins
O0OOoo . long_secs = self . long_secs
O0OOoo . altitude = self . altitude
O0OOoo . radius = self . radius
return ( O0OOoo )
if 32 - 32: i1IIi / Ii1I + i11iIiiIii % oO0o
if 11 - 11: Ii1I - ooOoO0o % i11iIiiIii / OoooooooOO - O0 - IiII
def no_geo_altitude ( self ) :
return ( self . altitude == - 1 )
if 25 - 25: IiII + O0 + oO0o % iIii1I11I1II1 - II111iiii . I1IiiI
if 62 - 62: IiII . O0 + oO0o - ooOoO0o * iIii1I11I1II1
def parse_geo_string ( self , geo_str ) :
oo0OOo0O = geo_str . find ( "]" )
if ( oo0OOo0O != - 1 ) : geo_str = geo_str [ oo0OOo0O + 1 : : ]
if 8 - 8: I1ii11iIi11i
if 65 - 65: i11iIiiIii
if 92 - 92: oO0o * II111iiii + I1Ii111
if 49 - 49: II111iiii * I1IiiI * O0 / ooOoO0o * IiII
if 94 - 94: OoO0O00 - I1IiiI * oO0o
if ( geo_str . find ( "/" ) != - 1 ) :
geo_str , i1IiiiIiii = geo_str . split ( "/" )
self . radius = int ( i1IiiiIiii )
if 81 - 81: ooOoO0o . Oo0Ooo . OoOoOO00 + OOooOOo % iII111i - oO0o
if 68 - 68: iII111i - O0 / Ii1I
geo_str = geo_str . split ( "-" )
if ( len ( geo_str ) < 8 ) : return ( False )
if 15 - 15: I1Ii111 / I1ii11iIi11i / I1IiiI % i11iIiiIii + II111iiii . ooOoO0o
oo00OOo0 = geo_str [ 0 : 4 ]
I1iiOO00o00oOoOo = geo_str [ 4 : 8 ]
if 81 - 81: I1Ii111 + Oo0Ooo . I1ii11iIi11i / I11i
if 16 - 16: Oo0Ooo * I1IiiI
if 100 - 100: I1ii11iIi11i
if 37 - 37: ooOoO0o . oO0o * ooOoO0o % iIii1I11I1II1 % Ii1I
if ( len ( geo_str ) > 8 ) : self . altitude = int ( geo_str [ 8 ] )
if 92 - 92: OoO0O00 * IiII
if 76 - 76: i1IIi
if 93 - 93: Oo0Ooo / I1ii11iIi11i + Oo0Ooo + OOooOOo
if 58 - 58: oO0o
self . latitude = int ( oo00OOo0 [ 0 ] )
self . lat_mins = int ( oo00OOo0 [ 1 ] )
self . lat_secs = int ( oo00OOo0 [ 2 ] )
if ( oo00OOo0 [ 3 ] == "N" ) : self . latitude = - self . latitude
if 9 - 9: I1Ii111 - i1IIi . ooOoO0o
if 33 - 33: I11i
if 37 - 37: Oo0Ooo
if 36 - 36: IiII % I11i
self . longitude = int ( I1iiOO00o00oOoOo [ 0 ] )
self . long_mins = int ( I1iiOO00o00oOoOo [ 1 ] )
self . long_secs = int ( I1iiOO00o00oOoOo [ 2 ] )
if ( I1iiOO00o00oOoOo [ 3 ] == "E" ) : self . longitude = - self . longitude
return ( True )
if 72 - 72: oO0o % I11i % OOooOOo * iIii1I11I1II1 - OOooOOo % O0
if 84 - 84: oO0o - o0oOOo0O0Ooo / II111iiii . o0oOOo0O0Ooo
def print_geo ( self ) :
ooOo = "N" if self . latitude < 0 else "S"
I11II1i1i = "E" if self . longitude < 0 else "W"
if 14 - 14: OoooooooOO
Ii1i11iIi1iII = "{}-{}-{}-{}-{}-{}-{}-{}" . format ( abs ( self . latitude ) ,
self . lat_mins , self . lat_secs , ooOo , abs ( self . longitude ) ,
self . long_mins , self . long_secs , I11II1i1i )
if 44 - 44: I11i * I11i + OoooooooOO
if ( self . no_geo_altitude ( ) == False ) :
Ii1i11iIi1iII += "-" + str ( self . altitude )
if 26 - 26: I1Ii111 * Ii1I
if 95 - 95: oO0o + OoOoOO00 / OoO0O00 % I1IiiI
if 28 - 28: I1IiiI
if 59 - 59: OOooOOo . I1IiiI / i1IIi / II111iiii . II111iiii
if 54 - 54: iIii1I11I1II1 % ooOoO0o
if ( self . radius != 0 ) : Ii1i11iIi1iII += "/{}" . format ( self . radius )
return ( Ii1i11iIi1iII )
if 37 - 37: OOooOOo % OoOoOO00 - II111iiii * o0oOOo0O0Ooo . I1IiiI . OoOoOO00
if 92 - 92: I11i + OoO0O00 . OoooooooOO
def geo_url ( self ) :
iIiIi1iIIii = os . getenv ( "LISP_GEO_ZOOM_LEVEL" )
iIiIi1iIIii = "10" if ( iIiIi1iIIii == "" or iIiIi1iIIii . isdigit ( ) == False ) else iIiIi1iIIii
I1iiIii11Ii , IIo0 = self . dms_to_decimal ( )
I1OOoO = ( "http://maps.googleapis.com/maps/api/staticmap?center={},{}" + "&markers=color:blue%7Clabel:lisp%7C{},{}" + "&zoom={}&size=1024x1024&sensor=false" ) . format ( I1iiIii11Ii , IIo0 , I1iiIii11Ii , IIo0 ,
# OoOoOO00 % Ii1I
# iII111i
iIiIi1iIIii )
return ( I1OOoO )
if 59 - 59: i11iIiiIii . OOooOOo
if 17 - 17: Ii1I - iII111i * I1ii11iIi11i
def print_geo_url ( self ) :
O0OOoo = self . print_geo ( )
if ( self . radius == 0 ) :
I1OOoO = self . geo_url ( )
O0I11IIIII = "<a href='{}'>{}</a>" . format ( I1OOoO , O0OOoo )
else :
I1OOoO = O0OOoo . replace ( "/" , "-" )
O0I11IIIII = "<a href='/lisp/geo-map/{}'>{}</a>" . format ( I1OOoO , O0OOoo )
if 79 - 79: i1IIi * OOooOOo % II111iiii % OoO0O00 / i11iIiiIii
return ( O0I11IIIII )
if 18 - 18: i11iIiiIii . oO0o
if 48 - 48: i1IIi
def dms_to_decimal ( self ) :
oO0oO0OO0 , IiI1i1II , I11iii1i1 = self . latitude , self . lat_mins , self . lat_secs
o00oo = float ( abs ( oO0oO0OO0 ) )
o00oo += float ( IiI1i1II * 60 + I11iii1i1 ) / 3600
if ( oO0oO0OO0 > 0 ) : o00oo = - o00oo
IiiiIIIi1 = o00oo
if 70 - 70: OOooOOo * OOooOOo - O0 - I1Ii111 / OoooooooOO - iII111i
oO0oO0OO0 , IiI1i1II , I11iii1i1 = self . longitude , self . long_mins , self . long_secs
o00oo = float ( abs ( oO0oO0OO0 ) )
o00oo += float ( IiI1i1II * 60 + I11iii1i1 ) / 3600
if ( oO0oO0OO0 > 0 ) : o00oo = - o00oo
O00O00oOO0Oo = o00oo
return ( ( IiiiIIIi1 , O00O00oOO0Oo ) )
if 99 - 99: oO0o . i11iIiiIii % i1IIi + iII111i
if 91 - 91: I1Ii111 . II111iiii / Ii1I * O0
def get_distance ( self , geo_point ) :
IIIi11I1IiiIi = self . dms_to_decimal ( )
o00OOo0oo0oO = geo_point . dms_to_decimal ( )
iIiiii = vincenty ( IIIi11I1IiiIi , o00OOo0oo0oO )
return ( iIiiii . km )
if 46 - 46: I11i * OOooOOo
if 57 - 57: iIii1I11I1II1
def point_in_circle ( self , geo_point ) :
Iiiii11iI = self . get_distance ( geo_point )
return ( Iiiii11iI <= self . radius )
if 61 - 61: I1ii11iIi11i . OOooOOo - O0 * OoOoOO00
if 12 - 12: I1ii11iIi11i / I1Ii111
def encode_geo ( self ) :
ooOO0o0ooOo0 = socket . htons ( LISP_AFI_LCAF )
I1i11I = socket . htons ( 20 + 2 )
I11I = 0
if 5 - 5: Oo0Ooo / o0oOOo0O0Ooo % i11iIiiIii - ooOoO0o
I1iiIii11Ii = abs ( self . latitude )
o0iIII1i11i = ( ( self . lat_mins * 60 ) + self . lat_secs ) * 1000
if ( self . latitude < 0 ) : I11I |= 0x40
if 48 - 48: Ii1I / Ii1I / i1IIi * I1IiiI . iII111i + I1ii11iIi11i
IIo0 = abs ( self . longitude )
ooiIIi11I1 = ( ( self . long_mins * 60 ) + self . long_secs ) * 1000
if ( self . longitude < 0 ) : I11I |= 0x20
if 3 - 3: OoOoOO00 / Oo0Ooo - Oo0Ooo
OO0I11iI = 0
if ( self . no_geo_altitude ( ) == False ) :
OO0I11iI = socket . htonl ( self . altitude )
I11I |= 0x10
if 52 - 52: OoO0O00 % I11i - oO0o . I11i % IiII
i1IiiiIiii = socket . htons ( self . radius )
if ( i1IiiiIiii != 0 ) : I11I |= 0x06
if 100 - 100: OoooooooOO % OoOoOO00 . i1IIi - Ii1I + iIii1I11I1II1
ooOOoO0Oo0OoO = struct . pack ( "HBBBBH" , ooOO0o0ooOo0 , 0 , 0 , LISP_LCAF_GEO_COORD_TYPE ,
0 , I1i11I )
ooOOoO0Oo0OoO += struct . pack ( "BBHBBHBBHIHHH" , I11I , 0 , 0 , I1iiIii11Ii , o0iIII1i11i >> 16 ,
socket . htons ( o0iIII1i11i & 0x0ffff ) , IIo0 , ooiIIi11I1 >> 16 ,
socket . htons ( ooiIIi11I1 & 0xffff ) , OO0I11iI , i1IiiiIiii , 0 , 0 )
if 10 - 10: ooOoO0o
return ( ooOOoO0Oo0OoO )
if 86 - 86: OoOoOO00 / Ii1I
if 80 - 80: II111iiii
def decode_geo ( self , packet , lcaf_len , radius_hi ) :
IIiI1I11ii1i = "BBHBBHBBHIHHH"
i1II1i1iiI1 = struct . calcsize ( IIiI1I11ii1i )
if ( lcaf_len < i1II1i1iiI1 ) : return ( None )
if 66 - 66: ooOoO0o
I11I , OoOoO , I1IIiI1i , I1iiIii11Ii , Oo0OooOoO , o0iIII1i11i , IIo0 , I1Ii1I , ooiIIi11I1 , OO0I11iI , i1IiiiIiii , O0000OO0O0 , o0o0O00oOo = struct . unpack ( IIiI1I11ii1i ,
# OoooooooOO - i1IIi * Ii1I * I1ii11iIi11i + I1Ii111 . I1IiiI
packet [ : i1II1i1iiI1 ] )
if 56 - 56: iIii1I11I1II1
if 37 - 37: OoOoOO00
if 56 - 56: OOooOOo / I11i - i11iIiiIii
if 11 - 11: iIii1I11I1II1
o0o0O00oOo = socket . ntohs ( o0o0O00oOo )
if ( o0o0O00oOo == LISP_AFI_LCAF ) : return ( None )
if 12 - 12: i1IIi + oO0o * I1Ii111 + OoOoOO00 . oO0o
if ( I11I & 0x40 ) : I1iiIii11Ii = - I1iiIii11Ii
self . latitude = I1iiIii11Ii
II1i1I = ( ( Oo0OooOoO << 16 ) | socket . ntohs ( o0iIII1i11i ) ) / 1000
self . lat_mins = II1i1I / 60
self . lat_secs = II1i1I % 60
if 19 - 19: iIii1I11I1II1 / iII111i + OOooOOo . ooOoO0o
if ( I11I & 0x20 ) : IIo0 = - IIo0
self . longitude = IIo0
o0oO = ( ( I1Ii1I << 16 ) | socket . ntohs ( ooiIIi11I1 ) ) / 1000
self . long_mins = o0oO / 60
self . long_secs = o0oO % 60
if 6 - 6: IiII
self . altitude = socket . ntohl ( OO0I11iI ) if ( I11I & 0x10 ) else - 1
i1IiiiIiii = socket . ntohs ( i1IiiiIiii )
self . radius = i1IiiiIiii if ( I11I & 0x02 ) else i1IiiiIiii * 1000
if 69 - 69: iII111i
self . geo_name = None
packet = packet [ i1II1i1iiI1 : : ]
if 87 - 87: i11iIiiIii % o0oOOo0O0Ooo + Ii1I
if ( o0o0O00oOo != 0 ) :
self . rloc . afi = o0o0O00oOo
packet = self . rloc . unpack_address ( packet )
self . rloc . mask_len = self . rloc . host_mask_len ( )
if 72 - 72: Ii1I / II111iiii + o0oOOo0O0Ooo
return ( packet )
if 33 - 33: I1Ii111 * OoOoOO00 - OoooooooOO
if 11 - 11: I1Ii111 - Oo0Ooo / iIii1I11I1II1 - OoooooooOO
if 71 - 71: Oo0Ooo + Ii1I - OoooooooOO + I11i - iIii1I11I1II1 / O0
if 76 - 76: i11iIiiIii % o0oOOo0O0Ooo . O0 * I11i
if 90 - 90: II111iiii + OOooOOo % I1Ii111 * iIii1I11I1II1 % iIii1I11I1II1
if 55 - 55: II111iiii % O0 * O0 - II111iiii * I1IiiI % Oo0Ooo
class lisp_rle_node ( ) :
def __init__ ( self ) :
self . address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . level = 0
self . translated_port = 0
self . rloc_name = None
if 48 - 48: I1ii11iIi11i + OoooooooOO % i1IIi
if 46 - 46: OoOoOO00
def copy_rle_node ( self ) :
I1I1iiI = lisp_rle_node ( )
I1I1iiI . address . copy_address ( self . address )
I1I1iiI . level = self . level
I1I1iiI . translated_port = self . translated_port
I1I1iiI . rloc_name = self . rloc_name
return ( I1I1iiI )
if 75 - 75: I1IiiI
if 37 - 37: iIii1I11I1II1 % OoO0O00 * ooOoO0o + I11i % ooOoO0o / i11iIiiIii
def store_translated_rloc ( self , rloc , port ) :
self . address . copy_address ( rloc )
self . translated_port = port
if 14 - 14: i1IIi / ooOoO0o
if 10 - 10: ooOoO0o / OoooooooOO - ooOoO0o % O0 + oO0o - oO0o
def get_encap_keys ( self ) :
Iiiii = "4341" if self . translated_port == 0 else str ( self . translated_port )
if 16 - 16: O0
ooOOo0o = self . address . print_address_no_iid ( ) + ":" + Iiiii
if 14 - 14: Ii1I . Ii1I . OOooOOo - O0 / OoO0O00 % II111iiii
try :
i1iIi = lisp_crypto_keys_by_rloc_encap [ ooOOo0o ]
if ( i1iIi [ 1 ] ) : return ( i1iIi [ 1 ] . encrypt_key , i1iIi [ 1 ] . icv_key )
return ( None , None )
except :
return ( None , None )
if 5 - 5: iIii1I11I1II1 % OoOoOO00 % OOooOOo % O0 * oO0o . iIii1I11I1II1
if 96 - 96: i11iIiiIii + oO0o / I1ii11iIi11i . IiII % o0oOOo0O0Ooo
if 41 - 41: o0oOOo0O0Ooo . i1IIi - OOooOOo
if 19 - 19: o0oOOo0O0Ooo % I1Ii111 % I11i
class lisp_rle ( ) :
def __init__ ( self , name ) :
self . rle_name = name
self . rle_nodes = [ ]
self . rle_forwarding_list = [ ]
if 1 - 1: I1IiiI / o0oOOo0O0Ooo - I1Ii111
if 50 - 50: I11i - OoOoOO00 + I1IiiI % Oo0Ooo / OoooooooOO - I1ii11iIi11i
def copy_rle ( self ) :
II1IIiiI1 = lisp_rle ( self . rle_name )
for I1I1iiI in self . rle_nodes :
II1IIiiI1 . rle_nodes . append ( I1I1iiI . copy_rle_node ( ) )
if 26 - 26: IiII . Ii1I
II1IIiiI1 . build_forwarding_list ( )
return ( II1IIiiI1 )
if 35 - 35: I1ii11iIi11i + OOooOOo
if 88 - 88: O0
def print_rle ( self , html ) :
oOOoo0O00 = ""
for I1I1iiI in self . rle_nodes :
Iiiii = I1I1iiI . translated_port
II1iiiIiiI = blue ( I1I1iiI . rloc_name , html ) if I1I1iiI . rloc_name != None else ""
if 29 - 29: Ii1I % o0oOOo0O0Ooo - Ii1I
ooOOo0o = I1I1iiI . address . print_address_no_iid ( )
if ( I1I1iiI . address . is_local ( ) ) : ooOOo0o = red ( ooOOo0o , html )
oOOoo0O00 += "{}{}(L{}){}, " . format ( ooOOo0o , "" if Iiiii == 0 else "-" + str ( Iiiii ) , I1I1iiI . level ,
# IiII - ooOoO0o / O0
"" if I1I1iiI . rloc_name == None else II1iiiIiiI )
if 27 - 27: Oo0Ooo
return ( oOOoo0O00 [ 0 : - 2 ] if oOOoo0O00 != "" else "" )
if 15 - 15: iIii1I11I1II1 . OoOoOO00 % Ii1I / i1IIi . o0oOOo0O0Ooo
if 45 - 45: iIii1I11I1II1 - i1IIi % I1IiiI - I1Ii111 + oO0o
def build_forwarding_list ( self ) :
IiIi1II1Ii = - 1
for I1I1iiI in self . rle_nodes :
if ( IiIi1II1Ii == - 1 ) :
if ( I1I1iiI . address . is_local ( ) ) : IiIi1II1Ii = I1I1iiI . level
else :
if ( I1I1iiI . level > IiIi1II1Ii ) : break
if 15 - 15: iIii1I11I1II1 - OoooooooOO / ooOoO0o
if 83 - 83: IiII + I1Ii111 / OoOoOO00 * IiII . oO0o
IiIi1II1Ii = 0 if IiIi1II1Ii == - 1 else I1I1iiI . level
if 22 - 22: O0 + ooOoO0o + I1Ii111
self . rle_forwarding_list = [ ]
for I1I1iiI in self . rle_nodes :
if ( I1I1iiI . level == IiIi1II1Ii or ( IiIi1II1Ii == 0 and
I1I1iiI . level == 128 ) ) :
if ( lisp_i_am_rtr == False and I1I1iiI . address . is_local ( ) ) :
ooOOo0o = I1I1iiI . address . print_address_no_iid ( )
lprint ( "Exclude local RLE RLOC {}" . format ( ooOOo0o ) )
continue
if 57 - 57: OOooOOo . ooOoO0o - OoooooooOO - I1ii11iIi11i * O0
self . rle_forwarding_list . append ( I1I1iiI )
if 85 - 85: I1IiiI * OoO0O00
if 63 - 63: I1IiiI - i11iIiiIii
if 4 - 4: OOooOOo + iIii1I11I1II1 / I1IiiI * Ii1I
if 64 - 64: OoOoOO00
if 94 - 94: OOooOOo * OoooooooOO * o0oOOo0O0Ooo / I1Ii111 . II111iiii
class lisp_json ( ) :
def __init__ ( self , name , string ) :
self . json_name = name
self . json_string = string
if 37 - 37: O0 * II111iiii * I1IiiI - O0 - I11i / i1IIi
if 27 - 27: i11iIiiIii + iIii1I11I1II1
def add ( self ) :
self . delete ( )
lisp_json_list [ self . json_name ] = self
if 15 - 15: oO0o
if 69 - 69: II111iiii * O0 . ooOoO0o * IiII
def delete ( self ) :
if ( lisp_json_list . has_key ( self . json_name ) ) :
del ( lisp_json_list [ self . json_name ] )
lisp_json_list [ self . json_name ] = None
if 25 - 25: I11i - I1ii11iIi11i . I1Ii111 . OoooooooOO
if 4 - 4: IiII * OoO0O00 % I1ii11iIi11i * Ii1I . iII111i
if 41 - 41: OoooooooOO % I11i . O0 + I1Ii111
def print_json ( self , html ) :
OOo0oOoOOO0oo = self . json_string
iiO0 = "***"
if ( html ) : iiO0 = red ( iiO0 , html )
iIioOooO = iiO0 + self . json_string + iiO0
if ( self . valid_json ( ) ) : return ( OOo0oOoOOO0oo )
return ( iIioOooO )
if 33 - 33: i1IIi / o0oOOo0O0Ooo . OoooooooOO
if 8 - 8: I1IiiI * OOooOOo * IiII / I1IiiI + i1IIi
def valid_json ( self ) :
try :
json . loads ( self . json_string )
except :
return ( False )
if 11 - 11: I11i * Ii1I * I1IiiI - I1IiiI % OoooooooOO
return ( True )
if 83 - 83: i11iIiiIii % iII111i * O0 % OoooooooOO
if 99 - 99: I1ii11iIi11i % I1ii11iIi11i * iII111i % oO0o
if 56 - 56: Oo0Ooo + i11iIiiIii - oO0o . Ii1I + IiII
if 19 - 19: I11i * OoooooooOO . i1IIi
if 100 - 100: II111iiii
if 95 - 95: iII111i
class lisp_stats ( ) :
def __init__ ( self ) :
self . packet_count = 0
self . byte_count = 0
self . last_rate_check = 0
self . last_packet_count = 0
self . last_byte_count = 0
self . last_increment = None
if 94 - 94: OoOoOO00 + OoooooooOO
if 92 - 92: i11iIiiIii * IiII * I1IiiI - oO0o / iII111i
def increment ( self , octets ) :
self . packet_count += 1
self . byte_count += octets
self . last_increment = lisp_get_timestamp ( )
if 1 - 1: ooOoO0o - OoO0O00 - o0oOOo0O0Ooo % I1Ii111 . I1ii11iIi11i - I1Ii111
if 78 - 78: Oo0Ooo
def recent_packet_sec ( self ) :
if ( self . last_increment == None ) : return ( False )
iIIiI1iiI = time . time ( ) - self . last_increment
return ( iIIiI1iiI <= 1 )
if 27 - 27: Ii1I / oO0o - Ii1I / iIii1I11I1II1 + o0oOOo0O0Ooo . Ii1I
if 79 - 79: Ii1I % O0 * OOooOOo
def recent_packet_min ( self ) :
if ( self . last_increment == None ) : return ( False )
iIIiI1iiI = time . time ( ) - self . last_increment
return ( iIIiI1iiI <= 60 )
if 41 - 41: I1ii11iIi11i . OoooooooOO * I1ii11iIi11i - oO0o
if 40 - 40: I1IiiI % OoO0O00 + i11iIiiIii / oO0o
def stat_colors ( self , c1 , c2 , html ) :
if ( self . recent_packet_sec ( ) ) :
return ( green_last_sec ( c1 ) , green_last_sec ( c2 ) )
if 98 - 98: oO0o + iIii1I11I1II1 . ooOoO0o / I1ii11iIi11i
if ( self . recent_packet_min ( ) ) :
return ( green_last_min ( c1 ) , green_last_min ( c2 ) )
if 77 - 77: OoOoOO00 / Oo0Ooo * OoOoOO00 % I1IiiI . II111iiii % OoO0O00
return ( c1 , c2 )
if 38 - 38: iII111i - OoO0O00 / i1IIi + ooOoO0o . ooOoO0o . iII111i
if 37 - 37: iIii1I11I1II1 * OoOoOO00 . OoOoOO00 + OoooooooOO + OoO0O00
def normalize ( self , count ) :
count = str ( count )
Iii1IIiii1iii1i = len ( count )
if ( Iii1IIiii1iii1i > 12 ) :
count = count [ 0 : - 10 ] + "." + count [ - 10 : - 7 ] + "T"
return ( count )
if 9 - 9: II111iiii / ooOoO0o - OOooOOo
if ( Iii1IIiii1iii1i > 9 ) :
count = count [ 0 : - 9 ] + "." + count [ - 9 : - 7 ] + "B"
return ( count )
if 57 - 57: I1ii11iIi11i
if ( Iii1IIiii1iii1i > 6 ) :
count = count [ 0 : - 6 ] + "." + count [ - 6 ] + "M"
return ( count )
if 82 - 82: o0oOOo0O0Ooo / O0 / iII111i / II111iiii - I11i % o0oOOo0O0Ooo
return ( count )
if 3 - 3: OoOoOO00 - Oo0Ooo - II111iiii
if 20 - 20: II111iiii . OOooOOo % OoooooooOO . iIii1I11I1II1 - I1IiiI
def get_stats ( self , summary , html ) :
oOo0O0 = self . last_rate_check
o000OOo00o0 = self . last_packet_count
II11ii = self . last_byte_count
self . last_rate_check = lisp_get_timestamp ( )
self . last_packet_count = self . packet_count
self . last_byte_count = self . byte_count
if 90 - 90: i1IIi
O0O0Oo = self . last_rate_check - oOo0O0
if ( O0O0Oo == 0 ) :
oOoO00o0 = 0
I1IiiiI1Ii1i = 0
else :
oOoO00o0 = int ( ( self . packet_count - o000OOo00o0 ) / O0O0Oo )
I1IiiiI1Ii1i = ( self . byte_count - II11ii ) / O0O0Oo
I1IiiiI1Ii1i = ( I1IiiiI1Ii1i * 8 ) / 1000000
I1IiiiI1Ii1i = round ( I1IiiiI1Ii1i , 2 )
if 76 - 76: o0oOOo0O0Ooo
if 80 - 80: OOooOOo
if 15 - 15: OOooOOo . OoOoOO00 / oO0o . I1ii11iIi11i % OoO0O00 - oO0o
if 21 - 21: ooOoO0o . o0oOOo0O0Ooo . oO0o . i1IIi
if 96 - 96: Ii1I % I11i * OoooooooOO . I1IiiI . iIii1I11I1II1
IiiIIi = self . normalize ( self . packet_count )
OO00o0oo0 = self . normalize ( self . byte_count )
if 31 - 31: iIii1I11I1II1 * OoO0O00 - I11i . OoO0O00 % iIii1I11I1II1
if 92 - 92: oO0o
if 45 - 45: I1Ii111 / O0 * OOooOOo / II111iiii % iIii1I11I1II1
if 48 - 48: ooOoO0o * I1Ii111 * ooOoO0o - Ii1I % OoooooooOO
if 18 - 18: OoOoOO00 % OoOoOO00 . o0oOOo0O0Ooo
if ( summary ) :
O0Ooo000 = "<br>" if html else ""
IiiIIi , OO00o0oo0 = self . stat_colors ( IiiIIi , OO00o0oo0 , html )
oOoO00O00OO0000 = "packet-count: {}{}byte-count: {}" . format ( IiiIIi , O0Ooo000 , OO00o0oo0 )
iiIIi11 = "packet-rate: {} pps\nbit-rate: {} Mbps" . format ( oOoO00o0 , I1IiiiI1Ii1i )
if 32 - 32: I1IiiI / i1IIi / I1ii11iIi11i % i1IIi . ooOoO0o % I1ii11iIi11i
if ( html != "" ) : iiIIi11 = lisp_span ( oOoO00O00OO0000 , iiIIi11 )
else :
OOO00o00o = str ( oOoO00o0 )
OOoO00O = str ( I1IiiiI1Ii1i )
if ( html ) :
IiiIIi = lisp_print_cour ( IiiIIi )
OOO00o00o = lisp_print_cour ( OOO00o00o )
OO00o0oo0 = lisp_print_cour ( OO00o0oo0 )
OOoO00O = lisp_print_cour ( OOoO00O )
if 16 - 16: I1IiiI % OoO0O00 . ooOoO0o / OoooooooOO
O0Ooo000 = "<br>" if html else ", "
if 8 - 8: I1Ii111 % OoO0O00 . I1IiiI - OoOoOO00 + i1IIi / iIii1I11I1II1
iiIIi11 = ( "packet-count: {}{}packet-rate: {} pps{}byte-count: " + "{}{}bit-rate: {} mbps" ) . format ( IiiIIi , O0Ooo000 , OOO00o00o , O0Ooo000 , OO00o0oo0 , O0Ooo000 ,
# I1IiiI + OOooOOo / Ii1I % i11iIiiIii - I1Ii111 % I11i
OOoO00O )
if 49 - 49: I11i * i1IIi - iII111i
return ( iiIIi11 )
if 98 - 98: iIii1I11I1II1 - I11i % i11iIiiIii * I1IiiI / OoOoOO00 * ooOoO0o
if 78 - 78: i11iIiiIii % oO0o % Ii1I / I1Ii111 / I1Ii111
if 20 - 20: iII111i / I11i / iIii1I11I1II1
if 94 - 94: i11iIiiIii % I1ii11iIi11i % IiII - I1Ii111
if 55 - 55: I11i - ooOoO0o - iIii1I11I1II1 + I1ii11iIi11i / IiII
if 49 - 49: I1ii11iIi11i
if 91 - 91: OOooOOo % iII111i
if 40 - 40: i11iIiiIii . II111iiii / OoOoOO00 + OoooooooOO + i1IIi . O0
lisp_decap_stats = {
"good-packets" : lisp_stats ( ) , "ICV-error" : lisp_stats ( ) ,
"checksum-error" : lisp_stats ( ) , "lisp-header-error" : lisp_stats ( ) ,
"no-decrypt-key" : lisp_stats ( ) , "bad-inner-version" : lisp_stats ( ) ,
"outer-header-error" : lisp_stats ( )
}
if 39 - 39: I1ii11iIi11i
if 26 - 26: oO0o . I1Ii111 % I11i
if 85 - 85: II111iiii / o0oOOo0O0Ooo . iIii1I11I1II1 . OoooooooOO / Ii1I
if 18 - 18: i11iIiiIii + o0oOOo0O0Ooo . i11iIiiIii
class lisp_rloc ( ) :
def __init__ ( self , recurse = True ) :
self . rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . rloc_name = None
self . interface = None
self . translated_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . translated_port = 0
self . priority = 255
self . weight = 0
self . mpriority = 255
self . mweight = 0
self . uptime = 0
self . state = LISP_RLOC_UP_STATE
self . last_state_change = None
self . rle_name = None
self . elp_name = None
self . geo_name = None
self . json_name = None
self . geo = None
self . elp = None
self . rle = None
self . json = None
self . stats = lisp_stats ( )
self . last_rloc_probe = None
self . last_rloc_probe_reply = None
self . rloc_probe_rtt = - 1
self . recent_rloc_probe_rtts = [ - 1 , - 1 , - 1 ]
self . rloc_probe_hops = "?/?"
self . recent_rloc_probe_hops = [ "?/?" , "?/?" , "?/?" ]
self . last_rloc_probe_nonce = 0
self . echo_nonce_capable = False
self . map_notify_requested = False
self . rloc_next_hop = None
self . next_rloc = None
if 50 - 50: IiII / OoooooooOO . I11i
if ( recurse == False ) : return
if 93 - 93: OOooOOo / OoooooooOO % iII111i % Ii1I / I1Ii111 % OOooOOo
if 25 - 25: i1IIi % Oo0Ooo . i1IIi * OoOoOO00 . Ii1I % OoO0O00
if 47 - 47: o0oOOo0O0Ooo - i11iIiiIii / OoooooooOO
if 93 - 93: I1IiiI * II111iiii * O0 % o0oOOo0O0Ooo + oO0o / ooOoO0o
if 79 - 79: OoO0O00 + ooOoO0o / oO0o % I1ii11iIi11i
if 77 - 77: Ii1I / Ii1I / I1ii11iIi11i
Oooo0Oo00O00 = lisp_get_default_route_next_hops ( )
if ( Oooo0Oo00O00 == [ ] or len ( Oooo0Oo00O00 ) == 1 ) : return
if 21 - 21: o0oOOo0O0Ooo . i11iIiiIii % OoooooooOO * O0
self . rloc_next_hop = Oooo0Oo00O00 [ 0 ]
i1OooO00oO00o = self
for O0o0 in Oooo0Oo00O00 [ 1 : : ] :
iII111 = lisp_rloc ( False )
iII111 = copy . deepcopy ( self )
iII111 . rloc_next_hop = O0o0
i1OooO00oO00o . next_rloc = iII111
i1OooO00oO00o = iII111
if 95 - 95: I11i * II111iiii * Ii1I
if 82 - 82: iII111i + i11iIiiIii + I1ii11iIi11i * Ii1I + I11i
if 13 - 13: Ii1I
def up_state ( self ) :
return ( self . state == LISP_RLOC_UP_STATE )
if 13 - 13: o0oOOo0O0Ooo - OoOoOO00 . O0
if 57 - 57: IiII % iII111i
def unreach_state ( self ) :
return ( self . state == LISP_RLOC_UNREACH_STATE )
if 21 - 21: OoOoOO00
if 86 - 86: O0 . O0 - I1Ii111
def no_echoed_nonce_state ( self ) :
return ( self . state == LISP_RLOC_NO_ECHOED_NONCE_STATE )
if 95 - 95: Ii1I / Ii1I * OoO0O00 . OoooooooOO . OoooooooOO * I11i
if 76 - 76: OoooooooOO - Ii1I + IiII % OoOoOO00 / OoooooooOO
def down_state ( self ) :
return ( self . state in [ LISP_RLOC_DOWN_STATE , LISP_RLOC_ADMIN_DOWN_STATE ] )
if 55 - 55: i11iIiiIii - IiII * OOooOOo + II111iiii . I1ii11iIi11i / O0
if 16 - 16: II111iiii . Oo0Ooo * I1Ii111 + o0oOOo0O0Ooo - i11iIiiIii
if 98 - 98: II111iiii - i1IIi - ooOoO0o
def print_state ( self ) :
if ( self . state is LISP_RLOC_UNKNOWN_STATE ) :
return ( "unknown-state" )
if ( self . state is LISP_RLOC_UP_STATE ) :
return ( "up-state" )
if ( self . state is LISP_RLOC_DOWN_STATE ) :
return ( "down-state" )
if ( self . state is LISP_RLOC_ADMIN_DOWN_STATE ) :
return ( "admin-down-state" )
if ( self . state is LISP_RLOC_UNREACH_STATE ) :
return ( "unreach-state" )
if ( self . state is LISP_RLOC_NO_ECHOED_NONCE_STATE ) :
return ( "no-echoed-nonce-state" )
return ( "invalid-state" )
if 36 - 36: IiII + o0oOOo0O0Ooo
if 81 - 81: OOooOOo / I11i % oO0o + ooOoO0o
def print_rloc ( self , indent ) :
OOOO0O00o = lisp_print_elapsed ( self . uptime )
lprint ( "{}rloc {}, uptime {}, {}, parms {}/{}/{}/{}" . format ( indent ,
red ( self . rloc . print_address ( ) , False ) , OOOO0O00o , self . print_state ( ) ,
self . priority , self . weight , self . mpriority , self . mweight ) )
if 10 - 10: oO0o / i11iIiiIii
if 73 - 73: OoO0O00 - i1IIi
def print_rloc_name ( self , cour = False ) :
if ( self . rloc_name == None ) : return ( "" )
i1OOO = self . rloc_name
if ( cour ) : i1OOO = lisp_print_cour ( i1OOO )
return ( 'rloc-name: {}' . format ( blue ( i1OOO , cour ) ) )
if 52 - 52: I1ii11iIi11i
if 4 - 4: Ii1I - iII111i + i1IIi - I1Ii111 / iII111i . Oo0Ooo
def store_rloc_from_record ( self , rloc_record , nonce , source ) :
Iiiii = LISP_DATA_PORT
self . rloc . copy_address ( rloc_record . rloc )
self . rloc_name = rloc_record . rloc_name
if 18 - 18: oO0o % iIii1I11I1II1 + ooOoO0o
if 34 - 34: I1IiiI - OoooooooOO . IiII - OOooOOo % IiII
if 19 - 19: IiII + I1ii11iIi11i % Oo0Ooo
if 32 - 32: OOooOOo
Oo0o0o0oo = self . rloc
if ( Oo0o0o0oo . is_null ( ) == False ) :
iiI = lisp_get_nat_info ( Oo0o0o0oo , self . rloc_name )
if ( iiI ) :
Iiiii = iiI . port
oOOooO0OO = lisp_nat_state_info [ self . rloc_name ] [ 0 ]
ooOOo0o = Oo0o0o0oo . print_address_no_iid ( )
ooOOo00o0ooO = red ( ooOOo0o , False )
iIiII1ii1i1 = "" if self . rloc_name == None else blue ( self . rloc_name , False )
if 12 - 12: i1IIi - oO0o . Ii1I
if 31 - 31: I11i
if 60 - 60: Oo0Ooo - iII111i . II111iiii % ooOoO0o / OoooooooOO / iIii1I11I1II1
if 23 - 23: I11i + iIii1I11I1II1
if 60 - 60: O0 * I1IiiI + o0oOOo0O0Ooo * OoO0O00 + o0oOOo0O0Ooo / i11iIiiIii
if 54 - 54: i11iIiiIii . iII111i * i1IIi
if ( iiI . timed_out ( ) ) :
lprint ( ( " Matched stored NAT state timed out for " + "RLOC {}:{}, {}" ) . format ( ooOOo00o0ooO , Iiiii , iIiII1ii1i1 ) )
if 68 - 68: Oo0Ooo
if 20 - 20: IiII + i11iIiiIii * OOooOOo
iiI = None if ( iiI == oOOooO0OO ) else oOOooO0OO
if ( iiI and iiI . timed_out ( ) ) :
Iiiii = iiI . port
ooOOo00o0ooO = red ( iiI . address , False )
lprint ( ( " Youngest stored NAT state timed out " + " for RLOC {}:{}, {}" ) . format ( ooOOo00o0ooO , Iiiii ,
# I1Ii111 + O0
iIiII1ii1i1 ) )
iiI = None
if 89 - 89: Oo0Ooo - OoO0O00 % O0
if 54 - 54: ooOoO0o + I1IiiI - I1ii11iIi11i * OOooOOo
if 100 - 100: OoO0O00 * oO0o + I1IiiI - o0oOOo0O0Ooo . o0oOOo0O0Ooo % OoO0O00
if 65 - 65: OoooooooOO / OoOoOO00 + I1IiiI - II111iiii / OoOoOO00
if 69 - 69: i11iIiiIii
if 77 - 77: I1ii11iIi11i % OoooooooOO - Oo0Ooo - Ii1I + I11i
if 93 - 93: I1IiiI % O0 * OoO0O00 % OoOoOO00 . I1Ii111 * I1IiiI
if ( iiI ) :
if ( iiI . address != ooOOo0o ) :
lprint ( "RLOC conflict, RLOC-record {}, NAT state {}" . format ( ooOOo00o0ooO , red ( iiI . address , False ) ) )
if 95 - 95: IiII + o0oOOo0O0Ooo - o0oOOo0O0Ooo
self . rloc . store_address ( iiI . address )
if 83 - 83: ooOoO0o
ooOOo00o0ooO = red ( iiI . address , False )
Iiiii = iiI . port
lprint ( " Use NAT translated RLOC {}:{} for {}" . format ( ooOOo00o0ooO , Iiiii , iIiII1ii1i1 ) )
if 59 - 59: I1ii11iIi11i
self . store_translated_rloc ( Oo0o0o0oo , Iiiii )
if 26 - 26: I11i . Ii1I
if 94 - 94: ooOoO0o . I1IiiI + IiII % I1IiiI / o0oOOo0O0Ooo % o0oOOo0O0Ooo
if 21 - 21: O0 / OOooOOo - II111iiii + I1ii11iIi11i / OoooooooOO
if 81 - 81: i11iIiiIii / Oo0Ooo * i1IIi + OoO0O00 + O0 % I1ii11iIi11i
self . geo = rloc_record . geo
self . elp = rloc_record . elp
self . json = rloc_record . json
if 3 - 3: i11iIiiIii * IiII . Oo0Ooo % OoOoOO00 * I11i . iII111i
if 80 - 80: I11i - IiII
if 40 - 40: OOooOOo * I1IiiI % I11i . I1Ii111 % O0 . O0
if 14 - 14: ooOoO0o . OoOoOO00 + ooOoO0o * OoOoOO00 . OoOoOO00 * Oo0Ooo
self . rle = rloc_record . rle
if ( self . rle ) :
for I1I1iiI in self . rle . rle_nodes :
i1OOO = I1I1iiI . rloc_name
iiI = lisp_get_nat_info ( I1I1iiI . address , i1OOO )
if ( iiI == None ) : continue
if 40 - 40: OoooooooOO
Iiiii = iiI . port
OOO0Oo0Oo = i1OOO
if ( OOO0Oo0Oo ) : OOO0Oo0Oo = blue ( i1OOO , False )
if 14 - 14: o0oOOo0O0Ooo / OOooOOo . OoOoOO00 % iIii1I11I1II1 % OoOoOO00
lprint ( ( " Store translated encap-port {} for RLE-" + "node {}, rloc-name '{}'" ) . format ( Iiiii ,
# Oo0Ooo / II111iiii - Oo0Ooo - OoOoOO00 - OoOoOO00 / Ii1I
I1I1iiI . address . print_address_no_iid ( ) , OOO0Oo0Oo ) )
I1I1iiI . translated_port = Iiiii
if 92 - 92: iIii1I11I1II1
if 21 - 21: I1IiiI
if 69 - 69: OoooooooOO + iII111i
self . priority = rloc_record . priority
self . mpriority = rloc_record . mpriority
self . weight = rloc_record . weight
self . mweight = rloc_record . mweight
if ( rloc_record . reach_bit and rloc_record . local_bit and
rloc_record . probe_bit == False ) : self . state = LISP_RLOC_UP_STATE
if 29 - 29: ooOoO0o * I1IiiI / Oo0Ooo / I1ii11iIi11i
if 74 - 74: I1ii11iIi11i - ooOoO0o / OoOoOO00 - OoooooooOO * oO0o
if 45 - 45: o0oOOo0O0Ooo . I1Ii111 % Ii1I
if 42 - 42: Oo0Ooo + i11iIiiIii - OOooOOo . I1ii11iIi11i % I1Ii111 . I1ii11iIi11i
o0Oo = source . is_exact_match ( rloc_record . rloc ) if source != None else None
if 1 - 1: II111iiii + O0 % oO0o % II111iiii / OOooOOo
if ( rloc_record . keys != None and o0Oo ) :
Iiii11 = rloc_record . keys [ 1 ]
if ( Iiii11 != None ) :
ooOOo0o = rloc_record . rloc . print_address_no_iid ( ) + ":" + str ( Iiiii )
if 59 - 59: I1IiiI
Iiii11 . add_key_by_rloc ( ooOOo0o , True )
lprint ( " Store encap-keys for nonce 0x{}, RLOC {}" . format ( lisp_hex_string ( nonce ) , red ( ooOOo0o , False ) ) )
if 78 - 78: iIii1I11I1II1
if 64 - 64: OoOoOO00 - oO0o
if 8 - 8: i11iIiiIii - iIii1I11I1II1 / I1Ii111 . i11iIiiIii % o0oOOo0O0Ooo / oO0o
return ( Iiiii )
if 36 - 36: IiII
if 53 - 53: OoooooooOO / I1IiiI % I11i + Oo0Ooo
def store_translated_rloc ( self , rloc , port ) :
self . rloc . copy_address ( rloc )
self . translated_rloc . copy_address ( rloc )
self . translated_port = port
if 15 - 15: O0
if 75 - 75: iII111i / OoOoOO00
def is_rloc_translated ( self ) :
return ( self . translated_rloc . is_null ( ) == False )
if 2 - 2: i1IIi + oO0o % iII111i % I1ii11iIi11i + ooOoO0o . iII111i
if 26 - 26: I11i + o0oOOo0O0Ooo + Ii1I % I11i
def rloc_exists ( self ) :
if ( self . rloc . is_null ( ) == False ) : return ( True )
if ( self . rle_name or self . geo_name or self . elp_name or self . json_name ) :
return ( False )
if 95 - 95: IiII - O0 * oO0o * O0
return ( True )
if 47 - 47: I1IiiI
if 20 - 20: I1Ii111
def is_rtr ( self ) :
return ( ( self . priority == 254 and self . mpriority == 255 and self . weight == 0 and self . mweight == 0 ) )
if 40 - 40: OoooooooOO / o0oOOo0O0Ooo + OoOoOO00
if 73 - 73: OOooOOo / Oo0Ooo
if 80 - 80: OoO0O00 + I1IiiI % i1IIi / I11i % i1IIi * i11iIiiIii
def print_state_change ( self , new_state ) :
II11i = self . print_state ( )
O0I11IIIII = "{} -> {}" . format ( II11i , new_state )
if ( new_state == "up" and self . unreach_state ( ) ) :
O0I11IIIII = bold ( O0I11IIIII , False )
if 6 - 6: II111iiii / o0oOOo0O0Ooo * O0 % I1ii11iIi11i
return ( O0I11IIIII )
if 11 - 11: I1Ii111
if 70 - 70: Ii1I
def print_rloc_probe_rtt ( self ) :
if ( self . rloc_probe_rtt == - 1 ) : return ( "none" )
return ( self . rloc_probe_rtt )
if 22 - 22: Ii1I
if 59 - 59: I1ii11iIi11i
def print_recent_rloc_probe_rtts ( self ) :
oO00o = str ( self . recent_rloc_probe_rtts )
oO00o = oO00o . replace ( "-1" , "?" )
return ( oO00o )
if 53 - 53: o0oOOo0O0Ooo * Oo0Ooo % I1IiiI
if 68 - 68: Oo0Ooo
def compute_rloc_probe_rtt ( self ) :
i1OooO00oO00o = self . rloc_probe_rtt
self . rloc_probe_rtt = - 1
if ( self . last_rloc_probe_reply == None ) : return
if ( self . last_rloc_probe == None ) : return
self . rloc_probe_rtt = self . last_rloc_probe_reply - self . last_rloc_probe
self . rloc_probe_rtt = round ( self . rloc_probe_rtt , 3 )
oOOO0o0O = self . recent_rloc_probe_rtts
self . recent_rloc_probe_rtts = [ i1OooO00oO00o ] + oOOO0o0O [ 0 : - 1 ]
if 50 - 50: i1IIi . iIii1I11I1II1 % OoO0O00
if 45 - 45: OoooooooOO . O0 * oO0o + IiII
def print_rloc_probe_hops ( self ) :
return ( self . rloc_probe_hops )
if 18 - 18: II111iiii . O0 - I11i / I11i
if 71 - 71: OoOoOO00 + iIii1I11I1II1 - II111iiii / i1IIi
def print_recent_rloc_probe_hops ( self ) :
I111II = str ( self . recent_rloc_probe_hops )
return ( I111II )
if 22 - 22: I1Ii111 - OOooOOo * i1IIi
if 88 - 88: ooOoO0o + iIii1I11I1II1 + OoO0O00 * I1Ii111 + oO0o
def store_rloc_probe_hops ( self , to_hops , from_ttl ) :
if ( to_hops == 0 ) :
to_hops = "?"
elif ( to_hops < LISP_RLOC_PROBE_TTL / 2 ) :
to_hops = "!"
else :
to_hops = str ( LISP_RLOC_PROBE_TTL - to_hops )
if 39 - 39: ooOoO0o - oO0o + OoOoOO00 - oO0o - Ii1I % I1Ii111
if ( from_ttl < LISP_RLOC_PROBE_TTL / 2 ) :
O000o00O0OOoo = "!"
else :
O000o00O0OOoo = str ( LISP_RLOC_PROBE_TTL - from_ttl )
if 32 - 32: I1Ii111 . I1IiiI
if 78 - 78: OoOoOO00 . I1ii11iIi11i / o0oOOo0O0Ooo
i1OooO00oO00o = self . rloc_probe_hops
self . rloc_probe_hops = to_hops + "/" + O000o00O0OOoo
oOOO0o0O = self . recent_rloc_probe_hops
self . recent_rloc_probe_hops = [ i1OooO00oO00o ] + oOOO0o0O [ 0 : - 1 ]
if 57 - 57: IiII % O0 * I1ii11iIi11i
if 61 - 61: O0
def process_rloc_probe_reply ( self , nonce , eid , group , hop_count , ttl ) :
Oo0o0o0oo = self
while ( True ) :
if ( Oo0o0o0oo . last_rloc_probe_nonce == nonce ) : break
Oo0o0o0oo = Oo0o0o0oo . next_rloc
if ( Oo0o0o0oo == None ) :
lprint ( " No matching nonce state found for nonce 0x{}" . format ( lisp_hex_string ( nonce ) ) )
if 51 - 51: I1Ii111 - I11i % o0oOOo0O0Ooo * Oo0Ooo - oO0o + II111iiii
return
if 7 - 7: oO0o
if 98 - 98: Ii1I + oO0o + i1IIi + IiII % IiII
if 79 - 79: oO0o % I11i * I11i . OOooOOo % OoooooooOO
Oo0o0o0oo . last_rloc_probe_reply = lisp_get_timestamp ( )
Oo0o0o0oo . compute_rloc_probe_rtt ( )
oOoOOOo0oo0 = Oo0o0o0oo . print_state_change ( "up" )
if ( Oo0o0o0oo . state != LISP_RLOC_UP_STATE ) :
lisp_update_rtr_updown ( Oo0o0o0oo . rloc , True )
Oo0o0o0oo . state = LISP_RLOC_UP_STATE
Oo0o0o0oo . last_state_change = lisp_get_timestamp ( )
ooooOoo000O = lisp_map_cache . lookup_cache ( eid , True )
if ( ooooOoo000O ) : lisp_write_ipc_map_cache ( True , ooooOoo000O )
if 87 - 87: iII111i - OoO0O00 . Ii1I / ooOoO0o
if 88 - 88: O0 % OOooOOo . iII111i
Oo0o0o0oo . store_rloc_probe_hops ( hop_count , ttl )
if 40 - 40: O0 . Ii1I % IiII % I1ii11iIi11i - OoOoOO00
oo00OO0Oooo = bold ( "RLOC-probe reply" , False )
ooOOo0o = Oo0o0o0oo . rloc . print_address_no_iid ( )
oooOoo = bold ( str ( Oo0o0o0oo . print_rloc_probe_rtt ( ) ) , False )
i111 = ":{}" . format ( self . translated_port ) if self . translated_port != 0 else ""
if 36 - 36: I1Ii111 . OoooooooOO - i1IIi % iII111i - II111iiii * i11iIiiIii
O0o0 = ""
if ( Oo0o0o0oo . rloc_next_hop != None ) :
i1 , oOO0OoOoOoo = Oo0o0o0oo . rloc_next_hop
O0o0 = ", nh {}({})" . format ( oOO0OoOoOoo , i1 )
if 88 - 88: Ii1I * OOooOOo / iII111i % iII111i % o0oOOo0O0Ooo + II111iiii
if 89 - 89: I1IiiI - OoooooooOO / I11i . ooOoO0o
Oo0ooo0Ooo = green ( lisp_print_eid_tuple ( eid , group ) , False )
lprint ( ( " Received {} from {}{} for {}, {}, rtt {}{}, " + "to-ttl/from-ttl {}" ) . format ( oo00OO0Oooo , red ( ooOOo0o , False ) , i111 , Oo0ooo0Ooo ,
# I1ii11iIi11i . Ii1I . iIii1I11I1II1 * I1ii11iIi11i / Ii1I
oOoOOOo0oo0 , oooOoo , O0o0 , str ( hop_count ) + "/" + str ( ttl ) ) )
if 74 - 74: Oo0Ooo * I1Ii111
if ( Oo0o0o0oo . rloc_next_hop == None ) : return
if 72 - 72: OoOoOO00 + O0 - IiII * ooOoO0o
if 20 - 20: II111iiii % OoOoOO00 * i11iIiiIii
if 68 - 68: IiII / ooOoO0o
if 100 - 100: ooOoO0o / I1IiiI
Oo0o0o0oo = None
O00OOO0 = None
while ( True ) :
Oo0o0o0oo = self if Oo0o0o0oo == None else Oo0o0o0oo . next_rloc
if ( Oo0o0o0oo == None ) : break
if ( Oo0o0o0oo . up_state ( ) == False ) : continue
if ( Oo0o0o0oo . rloc_probe_rtt == - 1 ) : continue
if 66 - 66: OoooooooOO / iII111i / I1IiiI % ooOoO0o / OoO0O00 + OOooOOo
if ( O00OOO0 == None ) : O00OOO0 = Oo0o0o0oo
if ( Oo0o0o0oo . rloc_probe_rtt < O00OOO0 . rloc_probe_rtt ) : O00OOO0 = Oo0o0o0oo
if 64 - 64: i1IIi
if 26 - 26: OoOoOO00 / o0oOOo0O0Ooo . OOooOOo + I1IiiI + Ii1I . iII111i
if ( O00OOO0 != None ) :
i1 , oOO0OoOoOoo = O00OOO0 . rloc_next_hop
O0o0 = bold ( "nh {}({})" . format ( oOO0OoOoOoo , i1 ) , False )
lprint ( " Install host-route via best {}" . format ( O0o0 ) )
lisp_install_host_route ( ooOOo0o , None , False )
lisp_install_host_route ( ooOOo0o , oOO0OoOoOoo , True )
if 89 - 89: I1Ii111 * I1IiiI . i1IIi - iIii1I11I1II1 * I1Ii111
if 5 - 5: OoOoOO00 % i1IIi
if 31 - 31: Oo0Ooo * O0 . OOooOOo . o0oOOo0O0Ooo + OoO0O00 + II111iiii
def add_to_rloc_probe_list ( self , eid , group ) :
ooOOo0o = self . rloc . print_address_no_iid ( )
Iiiii = self . translated_port
if ( Iiiii != 0 ) : ooOOo0o += ":" + str ( Iiiii )
if 76 - 76: Oo0Ooo + I1IiiI - O0
if ( lisp_rloc_probe_list . has_key ( ooOOo0o ) == False ) :
lisp_rloc_probe_list [ ooOOo0o ] = [ ]
if 58 - 58: IiII * i1IIi . I1IiiI - iII111i
if 73 - 73: Oo0Ooo . OoOoOO00
if ( group . is_null ( ) ) : group . instance_id = 0
for Oo0O , Oo0ooo0Ooo , o0 in lisp_rloc_probe_list [ ooOOo0o ] :
if ( Oo0ooo0Ooo . is_exact_match ( eid ) and o0 . is_exact_match ( group ) ) :
if ( Oo0O == self ) :
if ( lisp_rloc_probe_list [ ooOOo0o ] == [ ] ) :
lisp_rloc_probe_list . pop ( ooOOo0o )
if 50 - 50: IiII / o0oOOo0O0Ooo
return
if 9 - 9: Oo0Ooo - OoO0O00 + iII111i / OoooooooOO
lisp_rloc_probe_list [ ooOOo0o ] . remove ( [ Oo0O , Oo0ooo0Ooo , o0 ] )
break
if 52 - 52: O0
if 34 - 34: OoooooooOO + OoOoOO00 - Oo0Ooo . OOooOOo * iIii1I11I1II1
lisp_rloc_probe_list [ ooOOo0o ] . append ( [ self , eid , group ] )
if 93 - 93: i11iIiiIii / Oo0Ooo * OoOoOO00 / ooOoO0o + OoO0O00 * OOooOOo
if 81 - 81: IiII * iII111i + i1IIi + I1Ii111 / OoO0O00
if 83 - 83: oO0o / OoO0O00
if 34 - 34: OoooooooOO - i1IIi * O0
if 83 - 83: I1IiiI + OoO0O00
Oo0o0o0oo = lisp_rloc_probe_list [ ooOOo0o ] [ 0 ] [ 0 ]
if ( Oo0o0o0oo . state == LISP_RLOC_UNREACH_STATE ) :
self . state = LISP_RLOC_UNREACH_STATE
self . last_state_change = lisp_get_timestamp ( )
if 41 - 41: Ii1I + II111iiii . OOooOOo * I1Ii111 / II111iiii
if 32 - 32: Oo0Ooo - Ii1I % o0oOOo0O0Ooo
if 15 - 15: iIii1I11I1II1 * I1ii11iIi11i / ooOoO0o * oO0o % OOooOOo
def delete_from_rloc_probe_list ( self , eid , group ) :
ooOOo0o = self . rloc . print_address_no_iid ( )
Iiiii = self . translated_port
if ( Iiiii != 0 ) : ooOOo0o += ":" + str ( Iiiii )
if ( lisp_rloc_probe_list . has_key ( ooOOo0o ) == False ) : return
if 62 - 62: Ii1I / Oo0Ooo . OoO0O00 - OOooOOo
oOOOOoOO0Oo = [ ]
for iiIIIIiI111 in lisp_rloc_probe_list [ ooOOo0o ] :
if ( iiIIIIiI111 [ 0 ] != self ) : continue
if ( iiIIIIiI111 [ 1 ] . is_exact_match ( eid ) == False ) : continue
if ( iiIIIIiI111 [ 2 ] . is_exact_match ( group ) == False ) : continue
oOOOOoOO0Oo = iiIIIIiI111
break
if 84 - 84: Oo0Ooo * I1Ii111 - o0oOOo0O0Ooo % Ii1I
if ( oOOOOoOO0Oo == [ ] ) : return
if 69 - 69: I11i + OoOoOO00 - i11iIiiIii * O0 % O0
try :
lisp_rloc_probe_list [ ooOOo0o ] . remove ( oOOOOoOO0Oo )
if ( lisp_rloc_probe_list [ ooOOo0o ] == [ ] ) :
lisp_rloc_probe_list . pop ( ooOOo0o )
if 81 - 81: I11i - o0oOOo0O0Ooo % Ii1I / I1Ii111 * II111iiii
except :
return
if 40 - 40: OoO0O00 . i11iIiiIii
if 36 - 36: o0oOOo0O0Ooo * iII111i / I1ii11iIi11i % i1IIi % I1ii11iIi11i + i11iIiiIii
if 24 - 24: I1Ii111 / ooOoO0o - i11iIiiIii
def print_rloc_probe_state ( self , trailing_linefeed ) :
I1i = ""
Oo0o0o0oo = self
while ( True ) :
Iii111II1I11I = Oo0o0o0oo . last_rloc_probe
if ( Iii111II1I11I == None ) : Iii111II1I11I = 0
IIii = Oo0o0o0oo . last_rloc_probe_reply
if ( IIii == None ) : IIii = 0
oooOoo = Oo0o0o0oo . print_rloc_probe_rtt ( )
o00oOOO = space ( 4 )
if 56 - 56: II111iiii * iIii1I11I1II1 % I1ii11iIi11i
if ( Oo0o0o0oo . rloc_next_hop == None ) :
I1i += "RLOC-Probing:\n"
else :
i1 , oOO0OoOoOoo = Oo0o0o0oo . rloc_next_hop
I1i += "RLOC-Probing for nh {}({}):\n" . format ( oOO0OoOoOoo , i1 )
if 83 - 83: i1IIi . i11iIiiIii / iII111i
if 28 - 28: i1IIi - iII111i + o0oOOo0O0Ooo / Oo0Ooo * oO0o
I1i += ( "{}RLOC-probe request sent: {}\n{}RLOC-probe reply " + "received: {}, rtt {}" ) . format ( o00oOOO , lisp_print_elapsed ( Iii111II1I11I ) ,
# I11i
o00oOOO , lisp_print_elapsed ( IIii ) , oooOoo )
if 42 - 42: OOooOOo * ooOoO0o / i1IIi . i11iIiiIii - oO0o - Ii1I
if ( trailing_linefeed ) : I1i += "\n"
if 5 - 5: i1IIi + II111iiii . ooOoO0o
Oo0o0o0oo = Oo0o0o0oo . next_rloc
if ( Oo0o0o0oo == None ) : break
I1i += "\n"
if 21 - 21: i1IIi
return ( I1i )
if 96 - 96: OoOoOO00 * OoOoOO00 % OoO0O00 * iII111i
if 51 - 51: I1IiiI + i11iIiiIii + iII111i
def get_encap_keys ( self ) :
Iiiii = "4341" if self . translated_port == 0 else str ( self . translated_port )
if 57 - 57: Oo0Ooo . oO0o
ooOOo0o = self . rloc . print_address_no_iid ( ) + ":" + Iiiii
if 52 - 52: IiII % OoO0O00 - OoO0O00 . I1IiiI + OoO0O00 * ooOoO0o
try :
i1iIi = lisp_crypto_keys_by_rloc_encap [ ooOOo0o ]
if ( i1iIi [ 1 ] ) : return ( i1iIi [ 1 ] . encrypt_key , i1iIi [ 1 ] . icv_key )
return ( None , None )
except :
return ( None , None )
if 44 - 44: iIii1I11I1II1 / Ii1I - oO0o % i11iIiiIii
if 65 - 65: I1ii11iIi11i * Oo0Ooo / Ii1I . OOooOOo * iIii1I11I1II1 + Oo0Ooo
if 44 - 44: ooOoO0o * iII111i * IiII % o0oOOo0O0Ooo
def rloc_recent_rekey ( self ) :
Iiiii = "4341" if self . translated_port == 0 else str ( self . translated_port )
if 45 - 45: OoOoOO00 % o0oOOo0O0Ooo + IiII / i11iIiiIii
ooOOo0o = self . rloc . print_address_no_iid ( ) + ":" + Iiiii
if 29 - 29: iIii1I11I1II1 . OoO0O00 / I1IiiI
try :
Iiii11 = lisp_crypto_keys_by_rloc_encap [ ooOOo0o ] [ 1 ]
if ( Iiii11 == None ) : return ( False )
if ( Iiii11 . last_rekey == None ) : return ( True )
return ( time . time ( ) - Iiii11 . last_rekey < 1 )
except :
return ( False )
if 38 - 38: Oo0Ooo / Oo0Ooo % ooOoO0o
if 56 - 56: oO0o / iII111i % i1IIi * II111iiii . Ii1I
if 10 - 10: ooOoO0o - I1ii11iIi11i
if 82 - 82: o0oOOo0O0Ooo / I11i - I11i / O0 * I1IiiI / OoO0O00
class lisp_mapping ( ) :
def __init__ ( self , eid , group , rloc_set ) :
self . eid = eid
if ( eid == "" ) : self . eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . group = group
if ( group == "" ) : self . group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . rloc_set = rloc_set
self . best_rloc_set = [ ]
self . build_best_rloc_set ( )
self . uptime = lisp_get_timestamp ( )
self . action = LISP_NO_ACTION
self . expires = None
self . map_cache_ttl = None
self . last_refresh_time = self . uptime
self . source_cache = None
self . map_replies_sent = 0
self . mapping_source = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . use_mr_name = "all"
self . use_ms_name = "all"
self . stats = lisp_stats ( )
self . dynamic_eids = None
self . checkpoint_entry = False
self . secondary_iid = None
self . signature_eid = False
self . gleaned = False
if 71 - 71: I11i % I11i - i11iIiiIii + iIii1I11I1II1 / iII111i
if 63 - 63: O0 * i11iIiiIii / IiII / IiII
def print_mapping ( self , eid_indent , rloc_indent ) :
OOOO0O00o = lisp_print_elapsed ( self . uptime )
i1i11Ii1 = "" if self . group . is_null ( ) else ", group {}" . format ( self . group . print_prefix ( ) )
if 72 - 72: i11iIiiIii * OoOoOO00 % oO0o / I1Ii111
lprint ( "{}eid {}{}, uptime {}, {} rlocs:" . format ( eid_indent ,
green ( self . eid . print_prefix ( ) , False ) , i1i11Ii1 , OOOO0O00o ,
len ( self . rloc_set ) ) )
for Oo0o0o0oo in self . rloc_set : Oo0o0o0oo . print_rloc ( rloc_indent )
if 9 - 9: iIii1I11I1II1 . IiII
if 42 - 42: i1IIi / Ii1I * I1ii11iIi11i
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 9 - 9: I11i % i1IIi / i1IIi / OoO0O00
if 46 - 46: I1Ii111 * II111iiii + II111iiii * O0 % II111iiii
def print_ttl ( self ) :
Ii1 = self . map_cache_ttl
if ( Ii1 == None ) : return ( "forever" )
if 37 - 37: OOooOOo . iIii1I11I1II1 / O0 . ooOoO0o + OOooOOo - OoooooooOO
if ( Ii1 >= 3600 ) :
if ( ( Ii1 % 3600 ) == 0 ) :
Ii1 = str ( Ii1 / 3600 ) + " hours"
else :
Ii1 = str ( Ii1 * 60 ) + " mins"
if 96 - 96: I1Ii111 / oO0o . I1ii11iIi11i % I1IiiI * OOooOOo
elif ( Ii1 >= 60 ) :
if ( ( Ii1 % 60 ) == 0 ) :
Ii1 = str ( Ii1 / 60 ) + " mins"
else :
Ii1 = str ( Ii1 ) + " secs"
if 99 - 99: i11iIiiIii - I1Ii111
else :
Ii1 = str ( Ii1 ) + " secs"
if 4 - 4: o0oOOo0O0Ooo - i11iIiiIii . iIii1I11I1II1 . OOooOOo % IiII
return ( Ii1 )
if 68 - 68: I11i / iII111i - IiII . iIii1I11I1II1 / o0oOOo0O0Ooo
if 54 - 54: II111iiii * I1IiiI
def has_ttl_elapsed ( self ) :
if ( self . map_cache_ttl == None ) : return ( False )
iIIiI1iiI = time . time ( ) - self . last_refresh_time
if ( iIIiI1iiI >= self . map_cache_ttl ) : return ( True )
if 49 - 49: I1ii11iIi11i
if 31 - 31: o0oOOo0O0Ooo - OoOoOO00 + I1ii11iIi11i . oO0o - O0
if 61 - 61: I1ii11iIi11i * II111iiii . i1IIi
if 60 - 60: OoooooooOO % ooOoO0o * i11iIiiIii * OoooooooOO % IiII
if 15 - 15: oO0o
i1IIiiI1II1II = self . map_cache_ttl - ( self . map_cache_ttl / 10 )
if ( iIIiI1iiI >= i1IIiiI1II1II ) : return ( True )
return ( False )
if 69 - 69: OoO0O00 - ooOoO0o / IiII . Ii1I / Ii1I + o0oOOo0O0Ooo
if 9 - 9: IiII % I11i . I1Ii111 - I1ii11iIi11i + i11iIiiIii / I1IiiI
def is_active ( self ) :
if ( self . stats . last_increment == None ) : return ( False )
iIIiI1iiI = time . time ( ) - self . stats . last_increment
return ( iIIiI1iiI <= 60 )
if 12 - 12: iII111i . I1IiiI * OoooooooOO
if 80 - 80: i11iIiiIii . OoO0O00 - Oo0Ooo . OoO0O00
def match_eid_tuple ( self , db ) :
if ( self . eid . is_exact_match ( db . eid ) == False ) : return ( False )
if ( self . group . is_exact_match ( db . group ) == False ) : return ( False )
return ( True )
if 3 - 3: oO0o - I1IiiI - OoOoOO00 * I1Ii111 * i11iIiiIii . II111iiii
if 22 - 22: o0oOOo0O0Ooo
def sort_rloc_set ( self ) :
self . rloc_set . sort ( key = operator . attrgetter ( 'rloc.address' ) )
if 45 - 45: I1Ii111 + OoooooooOO + o0oOOo0O0Ooo * II111iiii
if 12 - 12: I1ii11iIi11i / O0
def delete_rlocs_from_rloc_probe_list ( self ) :
for Oo0o0o0oo in self . best_rloc_set :
Oo0o0o0oo . delete_from_rloc_probe_list ( self . eid , self . group )
if 18 - 18: OoOoOO00 . i11iIiiIii + i1IIi / OoooooooOO - IiII % OoO0O00
if 47 - 47: iII111i % IiII + I1Ii111 * o0oOOo0O0Ooo * OoooooooOO
if 100 - 100: Oo0Ooo / I1IiiI / iII111i / I1Ii111 / oO0o % o0oOOo0O0Ooo
def build_best_rloc_set ( self ) :
ii1II1 = self . best_rloc_set
self . best_rloc_set = [ ]
if ( self . rloc_set == None ) : return
if 20 - 20: i11iIiiIii / I1Ii111
if 5 - 5: I1IiiI * o0oOOo0O0Ooo % o0oOOo0O0Ooo + I1IiiI
if 35 - 35: oO0o + iII111i + I11i - I1ii11iIi11i - ooOoO0o - OOooOOo
if 77 - 77: OoooooooOO + OoooooooOO / oO0o * o0oOOo0O0Ooo / I11i
oOO00oOOOoO = 256
for Oo0o0o0oo in self . rloc_set :
if ( Oo0o0o0oo . up_state ( ) ) : oOO00oOOOoO = min ( Oo0o0o0oo . priority , oOO00oOOOoO )
if 82 - 82: OoooooooOO * O0 - ooOoO0o * Oo0Ooo % O0
if 19 - 19: O0 * oO0o + i11iIiiIii
if 74 - 74: OoOoOO00
if 91 - 91: i11iIiiIii / Ii1I % OOooOOo % O0 - I11i . I11i
if 78 - 78: i1IIi + I11i % OoooooooOO + i1IIi + iII111i % Ii1I
if 87 - 87: ooOoO0o . iIii1I11I1II1
if 99 - 99: Ii1I + OoooooooOO * IiII * i11iIiiIii - iIii1I11I1II1
if 58 - 58: IiII % i1IIi . i11iIiiIii
if 5 - 5: OoOoOO00
if 75 - 75: OOooOOo
for Oo0o0o0oo in self . rloc_set :
if ( Oo0o0o0oo . priority <= oOO00oOOOoO ) :
if ( Oo0o0o0oo . unreach_state ( ) and Oo0o0o0oo . last_rloc_probe == None ) :
Oo0o0o0oo . last_rloc_probe = lisp_get_timestamp ( )
if 60 - 60: ooOoO0o - II111iiii - iIii1I11I1II1
self . best_rloc_set . append ( Oo0o0o0oo )
if 23 - 23: I1ii11iIi11i
if 68 - 68: OoO0O00 . oO0o / IiII - II111iiii % Oo0Ooo
if 24 - 24: II111iiii / I1ii11iIi11i + oO0o / Ii1I + IiII % oO0o
if 86 - 86: I1IiiI
if 83 - 83: I11i % Ii1I + IiII % I11i / i1IIi . oO0o
if 56 - 56: I1Ii111 - OOooOOo % o0oOOo0O0Ooo
if 30 - 30: I1Ii111 % i1IIi
if 98 - 98: oO0o . i11iIiiIii / Ii1I - Ii1I
for Oo0o0o0oo in ii1II1 :
if ( Oo0o0o0oo . priority < oOO00oOOOoO ) : continue
Oo0o0o0oo . delete_from_rloc_probe_list ( self . eid , self . group )
if 23 - 23: iIii1I11I1II1
for Oo0o0o0oo in self . best_rloc_set :
if ( Oo0o0o0oo . rloc . is_null ( ) ) : continue
Oo0o0o0oo . add_to_rloc_probe_list ( self . eid , self . group )
if 30 - 30: I1ii11iIi11i + OoO0O00 - O0
if 42 - 42: I11i - I1Ii111
if 24 - 24: i1IIi
def select_rloc ( self , lisp_packet , ipc_socket ) :
oOo = lisp_packet . packet
OOOO0oooO = lisp_packet . inner_version
OOOOO000oo0 = len ( self . best_rloc_set )
if ( OOOOO000oo0 is 0 ) :
self . stats . increment ( len ( oOo ) )
return ( [ None , None , None , self . action , None , None ] )
if 56 - 56: I1Ii111 . I1ii11iIi11i - o0oOOo0O0Ooo / i11iIiiIii * iII111i / iIii1I11I1II1
if 49 - 49: I1IiiI / iIii1I11I1II1
Ii111Iii1ii = 4 if lisp_load_split_pings else 0
ooo000 = lisp_packet . hash_ports ( )
if ( OOOO0oooO == 4 ) :
for II11iIII1i1I in range ( 8 + Ii111Iii1ii ) :
ooo000 = ooo000 ^ struct . unpack ( "B" , oOo [ II11iIII1i1I + 12 ] ) [ 0 ]
if 16 - 16: O0
elif ( OOOO0oooO == 6 ) :
for II11iIII1i1I in range ( 0 , 32 + Ii111Iii1ii , 4 ) :
ooo000 = ooo000 ^ struct . unpack ( "I" , oOo [ II11iIII1i1I + 8 : II11iIII1i1I + 12 ] ) [ 0 ]
if 61 - 61: OoOoOO00 * OOooOOo
ooo000 = ( ooo000 >> 16 ) + ( ooo000 & 0xffff )
ooo000 = ( ooo000 >> 8 ) + ( ooo000 & 0xff )
else :
for II11iIII1i1I in range ( 0 , 12 + Ii111Iii1ii , 4 ) :
ooo000 = ooo000 ^ struct . unpack ( "I" , oOo [ II11iIII1i1I : II11iIII1i1I + 4 ] ) [ 0 ]
if 3 - 3: I1IiiI + Oo0Ooo / I1Ii111
if 17 - 17: i11iIiiIii / Oo0Ooo . o0oOOo0O0Ooo / I1IiiI . OOooOOo
if 10 - 10: I11i - OoOoOO00
if ( lisp_data_plane_logging ) :
IIIii = [ ]
for Oo0O in self . best_rloc_set :
if ( Oo0O . rloc . is_null ( ) ) : continue
IIIii . append ( [ Oo0O . rloc . print_address_no_iid ( ) , Oo0O . print_state ( ) ] )
if 100 - 100: oO0o * IiII * iII111i % iIii1I11I1II1
dprint ( "Packet hash {}, index {}, best-rloc-list: {}" . format ( hex ( ooo000 ) , ooo000 % OOOOO000oo0 , red ( str ( IIIii ) , False ) ) )
if 76 - 76: I11i * O0 * i1IIi
if 27 - 27: OoOoOO00 % OoooooooOO
if 77 - 77: Ii1I % Oo0Ooo
if 30 - 30: iIii1I11I1II1 * Oo0Ooo * OOooOOo * ooOoO0o
if 6 - 6: iIii1I11I1II1 / oO0o % ooOoO0o
if 19 - 19: iIii1I11I1II1 + I11i - iIii1I11I1II1 - Ii1I . Ii1I * OoO0O00
Oo0o0o0oo = self . best_rloc_set [ ooo000 % OOOOO000oo0 ]
if 32 - 32: I1IiiI + OOooOOo * oO0o
if 100 - 100: OoO0O00
if 20 - 20: Ii1I % OoO0O00
if 85 - 85: i1IIi % iIii1I11I1II1
if 10 - 10: O0 . oO0o * I1IiiI
IiIii1i11i1 = lisp_get_echo_nonce ( Oo0o0o0oo . rloc , None )
if ( IiIii1i11i1 ) :
IiIii1i11i1 . change_state ( Oo0o0o0oo )
if ( Oo0o0o0oo . no_echoed_nonce_state ( ) ) :
IiIii1i11i1 . request_nonce_sent = None
if 21 - 21: OoooooooOO
if 76 - 76: i1IIi * i11iIiiIii / OOooOOo + I1Ii111
if 50 - 50: oO0o % OoOoOO00 + I1IiiI
if 15 - 15: II111iiii - iII111i / I1ii11iIi11i
if 81 - 81: Ii1I - i1IIi % oO0o * Oo0Ooo * OoOoOO00
if 79 - 79: oO0o + I1IiiI % iII111i + II111iiii % OoO0O00 % iII111i
if ( Oo0o0o0oo . up_state ( ) == False ) :
iIIiIIiI = ooo000 % OOOOO000oo0
oo0OOo0O = ( iIIiIIiI + 1 ) % OOOOO000oo0
while ( oo0OOo0O != iIIiIIiI ) :
Oo0o0o0oo = self . best_rloc_set [ oo0OOo0O ]
if ( Oo0o0o0oo . up_state ( ) ) : break
oo0OOo0O = ( oo0OOo0O + 1 ) % OOOOO000oo0
if 59 - 59: OoooooooOO + I11i . oO0o
if ( oo0OOo0O == iIIiIIiI ) :
self . build_best_rloc_set ( )
return ( [ None , None , None , None , None , None ] )
if 65 - 65: I1ii11iIi11i * II111iiii % I11i + II111iiii . i1IIi / ooOoO0o
if 74 - 74: OoOoOO00 % OoO0O00 . OoOoOO00
if 16 - 16: OoO0O00 / Ii1I * i11iIiiIii / o0oOOo0O0Ooo + I1Ii111
if 21 - 21: I11i % I1ii11iIi11i
if 8 - 8: OOooOOo % OoO0O00 + O0 - o0oOOo0O0Ooo
if 46 - 46: Oo0Ooo . ooOoO0o + OoOoOO00 - I11i / i11iIiiIii . iII111i
Oo0o0o0oo . stats . increment ( len ( oOo ) )
if 80 - 80: II111iiii + OoO0O00 % ooOoO0o + i11iIiiIii
if 30 - 30: Ii1I / I1ii11iIi11i % IiII - Oo0Ooo
if 100 - 100: IiII . I1Ii111 * oO0o % OoO0O00 . iIii1I11I1II1 * Oo0Ooo
if 100 - 100: IiII - OoOoOO00 % iII111i
if ( Oo0o0o0oo . rle_name and Oo0o0o0oo . rle == None ) :
if ( lisp_rle_list . has_key ( Oo0o0o0oo . rle_name ) ) :
Oo0o0o0oo . rle = lisp_rle_list [ Oo0o0o0oo . rle_name ]
if 24 - 24: Oo0Ooo / OoO0O00 + i11iIiiIii
if 81 - 81: i11iIiiIii . iIii1I11I1II1 - OoooooooOO
if ( Oo0o0o0oo . rle ) : return ( [ None , None , None , None , Oo0o0o0oo . rle , None ] )
if 52 - 52: O0 - I1Ii111 + oO0o % ooOoO0o . oO0o
if 60 - 60: oO0o + o0oOOo0O0Ooo - OOooOOo % o0oOOo0O0Ooo . I11i + OoO0O00
if 27 - 27: i11iIiiIii - I1ii11iIi11i * I1Ii111 . I1IiiI / OoO0O00 * ooOoO0o
if 42 - 42: OOooOOo
if ( Oo0o0o0oo . elp and Oo0o0o0oo . elp . use_elp_node ) :
return ( [ Oo0o0o0oo . elp . use_elp_node . address , None , None , None , None ,
None ] )
if 36 - 36: OoooooooOO + ooOoO0o + iII111i
if 30 - 30: i1IIi % Ii1I
if 18 - 18: o0oOOo0O0Ooo % I1ii11iIi11i . Ii1I . O0 * II111iiii + I1ii11iIi11i
if 45 - 45: OoO0O00 / I1ii11iIi11i * ooOoO0o * OOooOOo % i11iIiiIii * iII111i
if 33 - 33: oO0o . iII111i + Oo0Ooo
iIIiII1iIII1i = None if ( Oo0o0o0oo . rloc . is_null ( ) ) else Oo0o0o0oo . rloc
Iiiii = Oo0o0o0oo . translated_port
O0oo0oo0 = self . action if ( iIIiII1iIII1i == None ) else None
if 95 - 95: O0
if 45 - 45: I1Ii111 + OoooooooOO . i11iIiiIii
if 65 - 65: I1IiiI % iIii1I11I1II1
if 52 - 52: I1IiiI
if 19 - 19: I1IiiI
i11III1I = None
if ( IiIii1i11i1 and IiIii1i11i1 . request_nonce_timeout ( ) == False ) :
i11III1I = IiIii1i11i1 . get_request_or_echo_nonce ( ipc_socket , iIIiII1iIII1i )
if 17 - 17: I11i + OoooooooOO
if 63 - 63: IiII
if 3 - 3: oO0o * II111iiii . O0
if 19 - 19: I1IiiI / I1IiiI / Oo0Ooo + oO0o + i1IIi
if 31 - 31: iII111i / OoooooooOO - I1Ii111 . iII111i
return ( [ iIIiII1iIII1i , Iiiii , i11III1I , O0oo0oo0 , None , Oo0o0o0oo ] )
if 38 - 38: ooOoO0o . OoooooooOO - II111iiii * i11iIiiIii / i1IIi . OoooooooOO
if 51 - 51: oO0o - I1ii11iIi11i + I1ii11iIi11i
def do_rloc_sets_match ( self , rloc_address_set ) :
if ( len ( self . rloc_set ) != len ( rloc_address_set ) ) : return ( False )
if 100 - 100: I11i - I1ii11iIi11i . i1IIi
if 85 - 85: II111iiii
if 58 - 58: i1IIi - OoO0O00 + ooOoO0o
if 6 - 6: IiII % I1IiiI + OoooooooOO * oO0o . iII111i + oO0o
if 4 - 4: I11i % I1IiiI
for O0OO0O in self . rloc_set :
for Oo0o0o0oo in rloc_address_set :
if ( Oo0o0o0oo . is_exact_match ( O0OO0O . rloc ) == False ) : continue
Oo0o0o0oo = None
break
if 72 - 72: I1IiiI % II111iiii % iII111i / OoOoOO00
if ( Oo0o0o0oo == rloc_address_set [ - 1 ] ) : return ( False )
if 96 - 96: OoOoOO00 % Ii1I
return ( True )
if 50 - 50: IiII - II111iiii
if 10 - 10: OoooooooOO % Ii1I * OOooOOo + IiII * oO0o
def get_rloc ( self , rloc ) :
for O0OO0O in self . rloc_set :
Oo0O = O0OO0O . rloc
if ( rloc . is_exact_match ( Oo0O ) ) : return ( O0OO0O )
if 13 - 13: II111iiii
return ( None )
if 14 - 14: i11iIiiIii . IiII
if 70 - 70: Oo0Ooo * OOooOOo + I1Ii111 % OoOoOO00 / O0
def get_rloc_by_interface ( self , interface ) :
for O0OO0O in self . rloc_set :
if ( O0OO0O . interface == interface ) : return ( O0OO0O )
if 23 - 23: O0 * oO0o / I1IiiI + i1IIi * O0 % oO0o
return ( None )
if 11 - 11: I1Ii111 . OoooooooOO * iIii1I11I1II1 / I1ii11iIi11i - ooOoO0o . iII111i
if 71 - 71: i11iIiiIii + I11i / i11iIiiIii % Oo0Ooo / iIii1I11I1II1 * OoO0O00
def add_db ( self ) :
if ( self . group . is_null ( ) ) :
lisp_db_for_lookups . add_cache ( self . eid , self )
else :
iIiIIi1i = lisp_db_for_lookups . lookup_cache ( self . group , True )
if ( iIiIIi1i == None ) :
iIiIIi1i = lisp_mapping ( self . group , self . group , [ ] )
lisp_db_for_lookups . add_cache ( self . group , iIiIIi1i )
if 49 - 49: iII111i + OoOoOO00
iIiIIi1i . add_source_entry ( self )
if 33 - 33: ooOoO0o
if 19 - 19: I1Ii111 % IiII
if 94 - 94: I1Ii111 * I1ii11iIi11i * I1ii11iIi11i - o0oOOo0O0Ooo . i11iIiiIii
def add_cache ( self , do_ipc = True ) :
if ( self . group . is_null ( ) ) :
lisp_map_cache . add_cache ( self . eid , self )
if ( lisp_program_hardware ) : lisp_program_vxlan_hardware ( self )
else :
ooooOoo000O = lisp_map_cache . lookup_cache ( self . group , True )
if ( ooooOoo000O == None ) :
ooooOoo000O = lisp_mapping ( self . group , self . group , [ ] )
ooooOoo000O . eid . copy_address ( self . group )
ooooOoo000O . group . copy_address ( self . group )
lisp_map_cache . add_cache ( self . group , ooooOoo000O )
if 16 - 16: i1IIi
if ( self . eid . is_null ( ) ) : self . eid . make_default_route ( ooooOoo000O . group )
ooooOoo000O . add_source_entry ( self )
if 88 - 88: OOooOOo
if ( do_ipc ) : lisp_write_ipc_map_cache ( True , self )
if 79 - 79: oO0o
if 52 - 52: oO0o + OoO0O00 / OoooooooOO - iIii1I11I1II1 / iII111i - oO0o
def delete_cache ( self ) :
self . delete_rlocs_from_rloc_probe_list ( )
lisp_write_ipc_map_cache ( False , self )
if 68 - 68: I1IiiI - OoOoOO00 - iIii1I11I1II1 % i11iIiiIii * OoOoOO00 * OoO0O00
if ( self . group . is_null ( ) ) :
lisp_map_cache . delete_cache ( self . eid )
if ( lisp_program_hardware ) :
OOO0000o = self . eid . print_prefix_no_iid ( )
os . system ( "ip route delete {}" . format ( OOO0000o ) )
if 85 - 85: oO0o * I1Ii111 * OoooooooOO % i11iIiiIii . Ii1I % i1IIi
else :
ooooOoo000O = lisp_map_cache . lookup_cache ( self . group , True )
if ( ooooOoo000O == None ) : return
if 40 - 40: Oo0Ooo
II1ii11II1 = ooooOoo000O . lookup_source_cache ( self . eid , True )
if ( II1ii11II1 == None ) : return
if 52 - 52: OoooooooOO
ooooOoo000O . source_cache . delete_cache ( self . eid )
if ( ooooOoo000O . source_cache . cache_size ( ) == 0 ) :
lisp_map_cache . delete_cache ( self . group )
if 22 - 22: OoooooooOO / OoO0O00 + Oo0Ooo % ooOoO0o
if 5 - 5: o0oOOo0O0Ooo / oO0o * ooOoO0o * I1Ii111
if 78 - 78: O0 + Ii1I / o0oOOo0O0Ooo + I1ii11iIi11i * oO0o / o0oOOo0O0Ooo
if 89 - 89: Oo0Ooo * Oo0Ooo . i11iIiiIii % I1ii11iIi11i - i11iIiiIii
def add_source_entry ( self , source_mc ) :
if ( self . source_cache == None ) : self . source_cache = lisp_cache ( )
self . source_cache . add_cache ( source_mc . eid , source_mc )
if 68 - 68: ooOoO0o
if 53 - 53: i11iIiiIii / OoOoOO00 % o0oOOo0O0Ooo / IiII
def lookup_source_cache ( self , source , exact ) :
if ( self . source_cache == None ) : return ( None )
return ( self . source_cache . lookup_cache ( source , exact ) )
if 88 - 88: ooOoO0o . i1IIi
if 21 - 21: OoO0O00 * I1ii11iIi11i + I1ii11iIi11i
def dynamic_eid_configured ( self ) :
return ( self . dynamic_eids != None )
if 36 - 36: Ii1I . OOooOOo * iIii1I11I1II1 - i1IIi
if 38 - 38: Oo0Ooo . o0oOOo0O0Ooo % oO0o / i11iIiiIii * OoO0O00 % OoOoOO00
def star_secondary_iid ( self , prefix ) :
if ( self . secondary_iid == None ) : return ( prefix )
II1 = "," + str ( self . secondary_iid )
return ( prefix . replace ( II1 , II1 + "*" ) )
if 18 - 18: OOooOOo
if 12 - 12: I1Ii111 % II111iiii / o0oOOo0O0Ooo - iIii1I11I1II1 + II111iiii
def increment_decap_stats ( self , packet ) :
Iiiii = packet . udp_dport
if ( Iiiii == LISP_DATA_PORT ) :
Oo0o0o0oo = self . get_rloc ( packet . outer_dest )
else :
if 41 - 41: OOooOOo
if 8 - 8: i11iIiiIii . IiII . I1ii11iIi11i + i1IIi % I1Ii111
if 64 - 64: I1IiiI . Oo0Ooo * OoO0O00
if 87 - 87: i1IIi / OoooooooOO
for Oo0o0o0oo in self . rloc_set :
if ( Oo0o0o0oo . translated_port != 0 ) : break
if 68 - 68: I1Ii111 / iIii1I11I1II1
if 8 - 8: ooOoO0o * IiII * OOooOOo / I1IiiI
if ( Oo0o0o0oo != None ) : Oo0o0o0oo . stats . increment ( len ( packet . packet ) )
self . stats . increment ( len ( packet . packet ) )
if 40 - 40: i11iIiiIii + OoooooooOO
if 2 - 2: o0oOOo0O0Ooo * OoO0O00
def rtrs_in_rloc_set ( self ) :
for Oo0o0o0oo in self . rloc_set :
if ( Oo0o0o0oo . is_rtr ( ) ) : return ( True )
if 88 - 88: Oo0Ooo + oO0o + iII111i
return ( False )
if 51 - 51: i1IIi + i11iIiiIii * I11i / iII111i + OoooooooOO
if 89 - 89: i11iIiiIii - I1Ii111 - O0 % iIii1I11I1II1 / IiII - O0
if 63 - 63: OOooOOo
class lisp_dynamic_eid ( ) :
def __init__ ( self ) :
self . dynamic_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . uptime = lisp_get_timestamp ( )
self . interface = None
self . last_packet = None
self . timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
if 23 - 23: Oo0Ooo / i1IIi - OOooOOo / Oo0Ooo
if 16 - 16: o0oOOo0O0Ooo - iIii1I11I1II1 / OoooooooOO / I1ii11iIi11i + IiII
def get_timeout ( self , interface ) :
try :
O0O0oo0O0O = lisp_myinterfaces [ interface ]
self . timeout = O0O0oo0O0O . dynamic_eid_timeout
except :
self . timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
if 65 - 65: I1Ii111 . I1Ii111
if 8 - 8: II111iiii - Oo0Ooo . iII111i
if 15 - 15: i11iIiiIii * I11i + oO0o
if 67 - 67: IiII . OoO0O00
class lisp_group_mapping ( ) :
def __init__ ( self , group_name , ms_name , group_prefix , sources , rle_addr ) :
self . group_name = group_name
self . group_prefix = group_prefix
self . use_ms_name = ms_name
self . sources = sources
self . rle_address = rle_addr
if 59 - 59: oO0o * o0oOOo0O0Ooo
if 76 - 76: I1IiiI
def add_group ( self ) :
lisp_group_mapping_list [ self . group_name ] = self
if 94 - 94: OoooooooOO * I1ii11iIi11i
if 28 - 28: II111iiii / II111iiii / II111iiii
if 70 - 70: OoO0O00 + O0 * OoO0O00
lisp_site_flags = {
"P" : "ETR is {}Requesting Map-Server to Proxy Map-Reply" ,
"S" : "ETR is {}LISP-SEC capable" ,
"I" : "xTR-ID and site-ID are {}included in Map-Register" ,
"T" : "Use Map-Register TTL field to timeout registration is {}set" ,
"R" : "Merging registrations are {}requested" ,
"M" : "ETR is {}a LISP Mobile-Node" ,
"N" : "ETR is {}requesting Map-Notify messages from Map-Server"
}
if 25 - 25: OoooooooOO . Oo0Ooo + OOooOOo + Oo0Ooo * O0 % i1IIi
class lisp_site ( ) :
def __init__ ( self ) :
self . site_name = ""
self . description = ""
self . shutdown = False
self . auth_sha1_or_sha2 = False
self . auth_key = { }
self . encryption_key = None
self . allowed_prefixes = { }
self . allowed_prefixes_sorted = [ ]
self . allowed_rlocs = { }
self . map_notifies_sent = 0
self . map_notify_acks_received = 0
if 71 - 71: II111iiii / Ii1I + i1IIi - OoOoOO00 + Ii1I
if 31 - 31: OoooooooOO * Ii1I - iII111i . oO0o % Ii1I
if 97 - 97: Ii1I
class lisp_site_eid ( ) :
def __init__ ( self , site ) :
self . site = site
self . eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . first_registered = 0
self . last_registered = 0
self . last_registerer = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . register_ttl = LISP_SITE_TIMEOUT_CHECK_INTERVAL * 3
self . registered = False
self . registered_rlocs = [ ]
self . auth_sha1_or_sha2 = False
self . individual_registrations = { }
self . map_registers_received = 0
self . proxy_reply_requested = False
self . force_proxy_reply = False
self . force_nat_proxy_reply = False
self . force_ttl = None
self . pitr_proxy_reply_drop = False
self . proxy_reply_action = ""
self . lisp_sec_present = False
self . map_notify_requested = False
self . mobile_node_requested = False
self . echo_nonce_capable = False
self . use_register_ttl_requested = False
self . merge_register_requested = False
self . xtr_id_present = False
self . xtr_id = 0
self . site_id = 0
self . accept_more_specifics = False
self . parent_for_more_specifics = None
self . dynamic = False
self . more_specific_registrations = [ ]
self . source_cache = None
self . inconsistent_registration = False
self . policy = None
self . require_signature = False
if 51 - 51: II111iiii . oO0o % iII111i
if 47 - 47: II111iiii - iII111i * I1IiiI . IiII
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 41 - 41: OoOoOO00 / O0 + I1Ii111 . I1ii11iIi11i
if 48 - 48: Ii1I . o0oOOo0O0Ooo * O0 / OoooooooOO + I1Ii111 + Oo0Ooo
def print_flags ( self , html ) :
if ( html == False ) :
I1i = "{}-{}-{}-{}-{}-{}-{}" . format ( "P" if self . proxy_reply_requested else "p" ,
# oO0o - Ii1I % OoO0O00 - I1Ii111 / II111iiii . Oo0Ooo
"S" if self . lisp_sec_present else "s" ,
"I" if self . xtr_id_present else "i" ,
"T" if self . use_register_ttl_requested else "t" ,
"R" if self . merge_register_requested else "r" ,
"M" if self . mobile_node_requested else "m" ,
"N" if self . map_notify_requested else "n" )
else :
I11Iii11i1Ii = self . print_flags ( False )
I11Iii11i1Ii = I11Iii11i1Ii . split ( "-" )
I1i = ""
for iI1IIi1 in I11Iii11i1Ii :
i1o0O = lisp_site_flags [ iI1IIi1 . upper ( ) ]
i1o0O = i1o0O . format ( "" if iI1IIi1 . isupper ( ) else "not " )
I1i += lisp_span ( iI1IIi1 , i1o0O )
if ( iI1IIi1 . lower ( ) != "n" ) : I1i += "-"
if 77 - 77: iII111i . I1IiiI - iIii1I11I1II1 + II111iiii / i1IIi
if 65 - 65: I1ii11iIi11i
return ( I1i )
if 2 - 2: iII111i % I1ii11iIi11i / iII111i
if 93 - 93: iII111i
def copy_state_to_parent ( self , child ) :
self . xtr_id = child . xtr_id
self . site_id = child . site_id
self . first_registered = child . first_registered
self . last_registered = child . last_registered
self . last_registerer = child . last_registerer
self . register_ttl = child . register_ttl
if ( self . registered == False ) :
self . first_registered = lisp_get_timestamp ( )
if 5 - 5: iII111i . I11i % I11i * Ii1I - I1ii11iIi11i . i11iIiiIii
self . auth_sha1_or_sha2 = child . auth_sha1_or_sha2
self . registered = child . registered
self . proxy_reply_requested = child . proxy_reply_requested
self . lisp_sec_present = child . lisp_sec_present
self . xtr_id_present = child . xtr_id_present
self . use_register_ttl_requested = child . use_register_ttl_requested
self . merge_register_requested = child . merge_register_requested
self . mobile_node_requested = child . mobile_node_requested
self . map_notify_requested = child . map_notify_requested
if 32 - 32: II111iiii
if 58 - 58: I1IiiI - o0oOOo0O0Ooo - I1Ii111 . O0 % OoO0O00 . I11i
def build_sort_key ( self ) :
I1O0oO = lisp_cache ( )
IIiiiII , Iiii11 = I1O0oO . build_key ( self . eid )
O0O00Oo00Oo0 = ""
if ( self . group . is_null ( ) == False ) :
iiI1ii1i , O0O00Oo00Oo0 = I1O0oO . build_key ( self . group )
O0O00Oo00Oo0 = "-" + O0O00Oo00Oo0 [ 0 : 12 ] + "-" + str ( iiI1ii1i ) + "-" + O0O00Oo00Oo0 [ 12 : : ]
if 70 - 70: I11i % i1IIi . I1Ii111 / oO0o + II111iiii % OoooooooOO
Iiii11 = Iiii11 [ 0 : 12 ] + "-" + str ( IIiiiII ) + "-" + Iiii11 [ 12 : : ] + O0O00Oo00Oo0
del ( I1O0oO )
return ( Iiii11 )
if 47 - 47: II111iiii . iIii1I11I1II1
if 95 - 95: II111iiii % Oo0Ooo + I11i
def merge_in_site_eid ( self , child ) :
oOOoOIiIII = False
if ( self . group . is_null ( ) ) :
self . merge_rlocs_in_site_eid ( )
else :
oOOoOIiIII = self . merge_rles_in_site_eid ( )
if 3 - 3: OoOoOO00 * OOooOOo - IiII - II111iiii * oO0o
if 23 - 23: I11i * I1ii11iIi11i . I11i
if 70 - 70: i1IIi * I1ii11iIi11i . oO0o - I1IiiI * Ii1I * iII111i
if 11 - 11: Oo0Ooo + I1ii11iIi11i
if 92 - 92: iII111i / II111iiii + i1IIi / I1ii11iIi11i
if 67 - 67: iII111i / IiII + I1IiiI + IiII % OoOoOO00 % I1ii11iIi11i
if ( child != None ) :
self . copy_state_to_parent ( child )
self . map_registers_received += 1
if 7 - 7: I1ii11iIi11i % OoOoOO00 - O0 . I1Ii111
return ( oOOoOIiIII )
if 9 - 9: Ii1I . OoooooooOO / ooOoO0o + i1IIi
if 90 - 90: oO0o - OoOoOO00 % ooOoO0o
def copy_rloc_records ( self ) :
o0OOO0OO = [ ]
for O0OO0O in self . registered_rlocs :
o0OOO0OO . append ( copy . deepcopy ( O0OO0O ) )
if 85 - 85: ooOoO0o + I1ii11iIi11i / oO0o . oO0o * Ii1I
return ( o0OOO0OO )
if 84 - 84: iII111i
if 32 - 32: II111iiii % OoO0O00 / i11iIiiIii . Oo0Ooo . OoooooooOO % oO0o
def merge_rlocs_in_site_eid ( self ) :
self . registered_rlocs = [ ]
for ooO00oO0O in self . individual_registrations . values ( ) :
if ( self . site_id != ooO00oO0O . site_id ) : continue
if ( ooO00oO0O . registered == False ) : continue
self . registered_rlocs += ooO00oO0O . copy_rloc_records ( )
if 63 - 63: Ii1I + ooOoO0o + OOooOOo
if 84 - 84: iII111i / Oo0Ooo
if 21 - 21: OoO0O00 . I1IiiI - OoO0O00
if 51 - 51: iIii1I11I1II1
if 5 - 5: oO0o - OoOoOO00 . ooOoO0o
if 97 - 97: I11i - ooOoO0o + oO0o . I1Ii111
o0OOO0OO = [ ]
for O0OO0O in self . registered_rlocs :
if ( O0OO0O . rloc . is_null ( ) or len ( o0OOO0OO ) == 0 ) :
o0OOO0OO . append ( O0OO0O )
continue
if 22 - 22: Ii1I - II111iiii % Oo0Ooo * OoOoOO00 + iIii1I11I1II1
for iI1I1iiI1I in o0OOO0OO :
if ( iI1I1iiI1I . rloc . is_null ( ) ) : continue
if ( O0OO0O . rloc . is_exact_match ( iI1I1iiI1I . rloc ) ) : break
if 41 - 41: OoooooooOO + iIii1I11I1II1 . O0 % I1Ii111 % OOooOOo + I1Ii111
if ( iI1I1iiI1I == o0OOO0OO [ - 1 ] ) : o0OOO0OO . append ( O0OO0O )
if 65 - 65: II111iiii . oO0o
self . registered_rlocs = o0OOO0OO
if 9 - 9: I1Ii111 . i11iIiiIii * I11i + o0oOOo0O0Ooo
if 85 - 85: i11iIiiIii * iII111i
if 43 - 43: Ii1I + iII111i * I1ii11iIi11i * Ii1I
if 62 - 62: O0
if ( len ( self . registered_rlocs ) == 0 ) : self . registered = False
return
if 44 - 44: i1IIi
if 27 - 27: ooOoO0o - Oo0Ooo + i11iIiiIii - oO0o % O0
def merge_rles_in_site_eid ( self ) :
if 68 - 68: iIii1I11I1II1 % Ii1I / I11i
if 17 - 17: IiII * Oo0Ooo . i11iIiiIii . IiII . Oo0Ooo % IiII
if 93 - 93: II111iiii - IiII - O0 - i11iIiiIii / OOooOOo
if 76 - 76: OOooOOo
I1iii11 = { }
for O0OO0O in self . registered_rlocs :
if ( O0OO0O . rle == None ) : continue
for I1I1iiI in O0OO0O . rle . rle_nodes :
iIiIi1iI11iiI = I1I1iiI . address . print_address_no_iid ( )
I1iii11 [ iIiIi1iI11iiI ] = I1I1iiI . address
if 47 - 47: Oo0Ooo + oO0o % OoooooooOO
break
if 23 - 23: I1Ii111 / i11iIiiIii - ooOoO0o * iII111i - Ii1I . iIii1I11I1II1
if 11 - 11: I11i % OoOoOO00 * Oo0Ooo
if 48 - 48: OOooOOo
if 66 - 66: iII111i - I1Ii111 - i11iIiiIii . o0oOOo0O0Ooo + Oo0Ooo
if 90 - 90: O0 - i11iIiiIii * ooOoO0o . I1ii11iIi11i . Ii1I - OoooooooOO
self . merge_rlocs_in_site_eid ( )
if 23 - 23: o0oOOo0O0Ooo
if 88 - 88: I1Ii111 + iIii1I11I1II1 / o0oOOo0O0Ooo
if 93 - 93: ooOoO0o % iIii1I11I1II1 - OOooOOo . IiII + ooOoO0o
if 63 - 63: I1ii11iIi11i / OOooOOo
if 28 - 28: I11i / I1Ii111 + IiII * OoooooooOO - iIii1I11I1II1
if 6 - 6: I11i % o0oOOo0O0Ooo / OoooooooOO . I1Ii111
if 17 - 17: I1ii11iIi11i + OoooooooOO / iIii1I11I1II1 . II111iiii + Oo0Ooo
if 7 - 7: O0 - I1ii11iIi11i - iIii1I11I1II1
OOi1Ii1ii11I1II = [ ]
for O0OO0O in self . registered_rlocs :
if ( self . registered_rlocs . index ( O0OO0O ) == 0 ) :
OOi1Ii1ii11I1II . append ( O0OO0O )
continue
if 38 - 38: OoOoOO00 + OoooooooOO
if ( O0OO0O . rle == None ) : OOi1Ii1ii11I1II . append ( O0OO0O )
if 89 - 89: OoooooooOO % II111iiii . I1ii11iIi11i + o0oOOo0O0Ooo % I1Ii111 * IiII
self . registered_rlocs = OOi1Ii1ii11I1II
if 89 - 89: OoO0O00
if 92 - 92: O0 / I11i % O0 + I1Ii111
if 48 - 48: iIii1I11I1II1 . i11iIiiIii / OoooooooOO . i1IIi . o0oOOo0O0Ooo
if 84 - 84: Ii1I
if 92 - 92: I11i
if 64 - 64: iII111i / iII111i * iII111i % O0 / IiII . I1ii11iIi11i
if 23 - 23: i1IIi / I1ii11iIi11i + o0oOOo0O0Ooo
II1IIiiI1 = lisp_rle ( "" )
Oo00oooOO00o0 = { }
i1OOO = None
for ooO00oO0O in self . individual_registrations . values ( ) :
if ( ooO00oO0O . registered == False ) : continue
IIiIIi1 = ooO00oO0O . registered_rlocs [ 0 ] . rle
if ( IIiIIi1 == None ) : continue
if 53 - 53: iII111i + oO0o % O0
i1OOO = ooO00oO0O . registered_rlocs [ 0 ] . rloc_name
for ooo0O in IIiIIi1 . rle_nodes :
iIiIi1iI11iiI = ooo0O . address . print_address_no_iid ( )
if ( Oo00oooOO00o0 . has_key ( iIiIi1iI11iiI ) ) : break
if 27 - 27: iII111i - I1ii11iIi11i . I1Ii111 / OOooOOo
I1I1iiI = lisp_rle_node ( )
I1I1iiI . address . copy_address ( ooo0O . address )
I1I1iiI . level = ooo0O . level
I1I1iiI . rloc_name = i1OOO
II1IIiiI1 . rle_nodes . append ( I1I1iiI )
Oo00oooOO00o0 [ iIiIi1iI11iiI ] = ooo0O . address
if 21 - 21: I11i / OOooOOo
if 96 - 96: i11iIiiIii * OoooooooOO - OoO0O00 % IiII * OOooOOo
if 28 - 28: oO0o . oO0o
if 79 - 79: OOooOOo + i11iIiiIii + OOooOOo % I1IiiI % OoOoOO00
if 50 - 50: o0oOOo0O0Ooo / iIii1I11I1II1 * OoO0O00
if 44 - 44: II111iiii / o0oOOo0O0Ooo
if ( len ( II1IIiiI1 . rle_nodes ) == 0 ) : II1IIiiI1 = None
if ( len ( self . registered_rlocs ) != 0 ) :
self . registered_rlocs [ 0 ] . rle = II1IIiiI1
if ( i1OOO ) : self . registered_rlocs [ 0 ] . rloc_name = None
if 81 - 81: I1Ii111 . Ii1I * ooOoO0o . IiII - OoOoOO00
if 79 - 79: ooOoO0o - O0
if 56 - 56: ooOoO0o
if 89 - 89: O0 % iIii1I11I1II1 / OoOoOO00 - I1Ii111 - I1IiiI
if 60 - 60: IiII % i11iIiiIii / OOooOOo
if ( I1iii11 . keys ( ) == Oo00oooOO00o0 . keys ( ) ) : return ( False )
if 43 - 43: i11iIiiIii * II111iiii + ooOoO0o - OoooooooOO * II111iiii / OoO0O00
lprint ( "{} {} from {} to {}" . format ( green ( self . print_eid_tuple ( ) , False ) , bold ( "RLE change" , False ) ,
# I1ii11iIi11i + I11i . iII111i * OoOoOO00 % I1ii11iIi11i / Ii1I
I1iii11 . keys ( ) , Oo00oooOO00o0 . keys ( ) ) )
if 48 - 48: I1ii11iIi11i - i1IIi
return ( True )
if 73 - 73: oO0o / iII111i * I1Ii111 + i1IIi * I1Ii111 / I1Ii111
if 75 - 75: iIii1I11I1II1 / OoO0O00 / i1IIi
def add_cache ( self ) :
if ( self . group . is_null ( ) ) :
lisp_sites_by_eid . add_cache ( self . eid , self )
else :
iIi1II1 = lisp_sites_by_eid . lookup_cache ( self . group , True )
if ( iIi1II1 == None ) :
iIi1II1 = lisp_site_eid ( self . site )
iIi1II1 . eid . copy_address ( self . group )
iIi1II1 . group . copy_address ( self . group )
lisp_sites_by_eid . add_cache ( self . group , iIi1II1 )
if 36 - 36: o0oOOo0O0Ooo + I1Ii111 / iII111i
if 48 - 48: I1IiiI % ooOoO0o * o0oOOo0O0Ooo * II111iiii - OoOoOO00
if 12 - 12: I1IiiI - Oo0Ooo / I11i
if 79 - 79: II111iiii . I1Ii111 * I1Ii111 + I11i + I1Ii111 % I1IiiI
if 42 - 42: I11i - i1IIi . Oo0Ooo - i1IIi
iIi1II1 . parent_for_more_specifics = self . parent_for_more_specifics
if 87 - 87: O0 . o0oOOo0O0Ooo % OOooOOo / I11i - I1Ii111 % i11iIiiIii
if ( self . eid . is_null ( ) ) : self . eid . make_default_route ( iIi1II1 . group )
iIi1II1 . add_source_entry ( self )
if 3 - 3: oO0o + iII111i + OOooOOo
if 54 - 54: i11iIiiIii + OoO0O00 - IiII - iII111i / I11i
if 85 - 85: OOooOOo * OOooOOo * I1Ii111 - ooOoO0o . O0 % iII111i
def delete_cache ( self ) :
if ( self . group . is_null ( ) ) :
lisp_sites_by_eid . delete_cache ( self . eid )
else :
iIi1II1 = lisp_sites_by_eid . lookup_cache ( self . group , True )
if ( iIi1II1 == None ) : return
if 5 - 5: i1IIi * iII111i . o0oOOo0O0Ooo - I1ii11iIi11i
ooO00oO0O = iIi1II1 . lookup_source_cache ( self . eid , True )
if ( ooO00oO0O == None ) : return
if 84 - 84: i1IIi
if ( iIi1II1 . source_cache == None ) : return
if 17 - 17: IiII + iII111i * OoO0O00 / iII111i
iIi1II1 . source_cache . delete_cache ( self . eid )
if ( iIi1II1 . source_cache . cache_size ( ) == 0 ) :
lisp_sites_by_eid . delete_cache ( self . group )
if 67 - 67: i1IIi * IiII . OoOoOO00 % iIii1I11I1II1 - iIii1I11I1II1 * I1ii11iIi11i
if 96 - 96: iII111i / i11iIiiIii / oO0o + Oo0Ooo
if 65 - 65: OoOoOO00
if 87 - 87: I11i % i1IIi + i11iIiiIii * II111iiii
def add_source_entry ( self , source_se ) :
if ( self . source_cache == None ) : self . source_cache = lisp_cache ( )
self . source_cache . add_cache ( source_se . eid , source_se )
if 58 - 58: OoO0O00 * I1IiiI - II111iiii / Ii1I - I1IiiI % OoooooooOO
if 33 - 33: IiII / i1IIi + I1Ii111
def lookup_source_cache ( self , source , exact ) :
if ( self . source_cache == None ) : return ( None )
return ( self . source_cache . lookup_cache ( source , exact ) )
if 5 - 5: O0 / iII111i % II111iiii . Oo0Ooo - I11i
if 84 - 84: oO0o * iII111i % i11iIiiIii - O0 . iIii1I11I1II1 - OoOoOO00
def is_star_g ( self ) :
if ( self . group . is_null ( ) ) : return ( False )
return ( self . eid . is_exact_match ( self . group ) )
if 73 - 73: OoOoOO00
if 66 - 66: Oo0Ooo
def eid_record_matches ( self , eid_record ) :
if ( self . eid . is_exact_match ( eid_record . eid ) == False ) : return ( False )
if ( eid_record . group . is_null ( ) ) : return ( True )
return ( eid_record . group . is_exact_match ( self . group ) )
if 42 - 42: i11iIiiIii / II111iiii . OOooOOo
if 65 - 65: OoOoOO00 % II111iiii + Oo0Ooo
def inherit_from_ams_parent ( self ) :
OOooOo00Ooo = self . parent_for_more_specifics
if ( OOooOo00Ooo == None ) : return
self . force_proxy_reply = OOooOo00Ooo . force_proxy_reply
self . force_nat_proxy_reply = OOooOo00Ooo . force_nat_proxy_reply
self . force_ttl = OOooOo00Ooo . force_ttl
self . pitr_proxy_reply_drop = OOooOo00Ooo . pitr_proxy_reply_drop
self . proxy_reply_action = OOooOo00Ooo . proxy_reply_action
self . echo_nonce_capable = OOooOo00Ooo . echo_nonce_capable
self . policy = OOooOo00Ooo . policy
self . require_signature = OOooOo00Ooo . require_signature
if 24 - 24: OoO0O00 % OoooooooOO
if 16 - 16: OoOoOO00 % Oo0Ooo * OoOoOO00 . Ii1I
def rtrs_in_rloc_set ( self ) :
for O0OO0O in self . registered_rlocs :
if ( O0OO0O . is_rtr ( ) ) : return ( True )
if 91 - 91: I1Ii111 - OoooooooOO . i1IIi . I1ii11iIi11i
return ( False )
if 37 - 37: IiII - oO0o
if 92 - 92: I1IiiI
def is_rtr_in_rloc_set ( self , rtr_rloc ) :
for O0OO0O in self . registered_rlocs :
if ( O0OO0O . rloc . is_exact_match ( rtr_rloc ) == False ) : continue
if ( O0OO0O . is_rtr ( ) ) : return ( True )
if 51 - 51: OoO0O00 + Oo0Ooo - OOooOOo + I1ii11iIi11i
return ( False )
if 32 - 32: I1ii11iIi11i % OoOoOO00 + Oo0Ooo
if 92 - 92: II111iiii . O0 . iIii1I11I1II1 % IiII - i11iIiiIii
def is_rloc_in_rloc_set ( self , rloc ) :
for O0OO0O in self . registered_rlocs :
if ( O0OO0O . rle ) :
for II1IIiiI1 in O0OO0O . rle . rle_nodes :
if ( II1IIiiI1 . address . is_exact_match ( rloc ) ) : return ( True )
if 9 - 9: OoO0O00
if 60 - 60: O0 / OoOoOO00 % i11iIiiIii % II111iiii / OoooooooOO
if ( O0OO0O . rloc . is_exact_match ( rloc ) ) : return ( True )
if 52 - 52: ooOoO0o
return ( False )
if 100 - 100: Oo0Ooo - o0oOOo0O0Ooo + iIii1I11I1II1 / ooOoO0o % iIii1I11I1II1
if 4 - 4: OoOoOO00 / Oo0Ooo - OoO0O00 . OoOoOO00 / I1Ii111
def do_rloc_sets_match ( self , prev_rloc_set ) :
if ( len ( self . registered_rlocs ) != len ( prev_rloc_set ) ) : return ( False )
if 60 - 60: OOooOOo * I1Ii111
for O0OO0O in prev_rloc_set :
OoOO0 = O0OO0O . rloc
if ( self . is_rloc_in_rloc_set ( OoOO0 ) == False ) : return ( False )
if 17 - 17: iII111i * I11i / iIii1I11I1II1 - II111iiii
return ( True )
if 97 - 97: II111iiii * o0oOOo0O0Ooo
if 13 - 13: o0oOOo0O0Ooo . II111iiii
if 76 - 76: II111iiii + I1Ii111 . OoooooooOO / IiII % i11iIiiIii
class lisp_mr ( ) :
def __init__ ( self , addr_str , dns_name , mr_name ) :
self . mr_name = mr_name if ( mr_name != None ) else "all"
self . dns_name = dns_name
self . map_resolver = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . last_dns_resolve = None
self . a_record_index = 0
if ( addr_str ) :
self . map_resolver . store_address ( addr_str )
self . insert_mr ( )
else :
self . resolve_dns_name ( )
if 87 - 87: Ii1I / OoOoOO00 / OOooOOo
self . last_used = 0
self . last_reply = 0
self . last_nonce = 0
self . map_requests_sent = 0
self . neg_map_replies_received = 0
self . total_rtt = 0
if 11 - 11: o0oOOo0O0Ooo * OoO0O00 . o0oOOo0O0Ooo - I1IiiI / IiII - OOooOOo
if 19 - 19: i1IIi + IiII . OoO0O00 / O0 - I1Ii111 - Oo0Ooo
def resolve_dns_name ( self ) :
if ( self . dns_name == None ) : return
if ( self . last_dns_resolve and
time . time ( ) - self . last_dns_resolve < 30 ) : return
if 24 - 24: iII111i + i1IIi
try :
o00oo0OO0 = socket . gethostbyname_ex ( self . dns_name )
self . last_dns_resolve = lisp_get_timestamp ( )
iII1ii1 = o00oo0OO0 [ 2 ]
except :
return
if 59 - 59: oO0o
if 43 - 43: II111iiii - OoooooooOO
if 11 - 11: I1IiiI
if 76 - 76: iII111i - II111iiii % Oo0Ooo . I1Ii111
if 64 - 64: OoO0O00 - OoO0O00
if 93 - 93: Oo0Ooo . O0
if ( len ( iII1ii1 ) <= self . a_record_index ) :
self . delete_mr ( )
return
if 75 - 75: iII111i * II111iiii - I1IiiI
if 30 - 30: i1IIi / ooOoO0o . ooOoO0o
iIiIi1iI11iiI = iII1ii1 [ self . a_record_index ]
if ( iIiIi1iI11iiI != self . map_resolver . print_address_no_iid ( ) ) :
self . delete_mr ( )
self . map_resolver . store_address ( iIiIi1iI11iiI )
self . insert_mr ( )
if 22 - 22: I11i % iIii1I11I1II1 - i11iIiiIii * OoOoOO00 - I1Ii111
if 97 - 97: i11iIiiIii . OoOoOO00 + oO0o * O0 % OoO0O00 - Ii1I
if 46 - 46: I1Ii111
if 87 - 87: o0oOOo0O0Ooo - iII111i * OoO0O00 * o0oOOo0O0Ooo . o0oOOo0O0Ooo / OOooOOo
if 50 - 50: i11iIiiIii - II111iiii * OoooooooOO + II111iiii - ooOoO0o
if 52 - 52: i1IIi + i1IIi * i1IIi / OoOoOO00
if ( lisp_is_decent_dns_suffix ( self . dns_name ) == False ) : return
if ( self . a_record_index != 0 ) : return
if 98 - 98: iII111i . i1IIi + o0oOOo0O0Ooo * OoooooooOO - i11iIiiIii
for iIiIi1iI11iiI in iII1ii1 [ 1 : : ] :
ii1iI1iI1 = lisp_address ( LISP_AFI_NONE , iIiIi1iI11iiI , 0 , 0 )
Ii1IIi1III1i = lisp_get_map_resolver ( ii1iI1iI1 , None )
if ( Ii1IIi1III1i != None and Ii1IIi1III1i . a_record_index == iII1ii1 . index ( iIiIi1iI11iiI ) ) :
continue
if 21 - 21: i11iIiiIii . oO0o * o0oOOo0O0Ooo + Oo0Ooo * OoOoOO00 * o0oOOo0O0Ooo
Ii1IIi1III1i = lisp_mr ( iIiIi1iI11iiI , None , None )
Ii1IIi1III1i . a_record_index = iII1ii1 . index ( iIiIi1iI11iiI )
Ii1IIi1III1i . dns_name = self . dns_name
Ii1IIi1III1i . last_dns_resolve = lisp_get_timestamp ( )
if 33 - 33: I1IiiI + O0 - I11i
if 90 - 90: I1Ii111 * OoooooooOO . iIii1I11I1II1 % OoO0O00 / I11i + iII111i
if 63 - 63: o0oOOo0O0Ooo . IiII . Oo0Ooo - iIii1I11I1II1 / I1Ii111
if 66 - 66: ooOoO0o * I1Ii111 - II111iiii
if 38 - 38: O0 % I1ii11iIi11i + O0
iIIii1III = [ ]
for Ii1IIi1III1i in lisp_map_resolvers_list . values ( ) :
if ( self . dns_name != Ii1IIi1III1i . dns_name ) : continue
ii1iI1iI1 = Ii1IIi1III1i . map_resolver . print_address_no_iid ( )
if ( ii1iI1iI1 in iII1ii1 ) : continue
iIIii1III . append ( Ii1IIi1III1i )
if 3 - 3: I1Ii111 % OoooooooOO / O0 * OoOoOO00 . Ii1I
for Ii1IIi1III1i in iIIii1III : Ii1IIi1III1i . delete_mr ( )
if 39 - 39: Oo0Ooo * ooOoO0o - OoOoOO00
if 48 - 48: I11i . I1IiiI
def insert_mr ( self ) :
Iiii11 = self . mr_name + self . map_resolver . print_address ( )
lisp_map_resolvers_list [ Iiii11 ] = self
if 29 - 29: ooOoO0o
if 18 - 18: I1Ii111 / O0 - II111iiii % IiII - ooOoO0o
def delete_mr ( self ) :
Iiii11 = self . mr_name + self . map_resolver . print_address ( )
if ( lisp_map_resolvers_list . has_key ( Iiii11 ) == False ) : return
lisp_map_resolvers_list . pop ( Iiii11 )
if 48 - 48: OOooOOo * OoOoOO00 / oO0o + II111iiii - I1ii11iIi11i
if 85 - 85: I1ii11iIi11i * OoooooooOO . OOooOOo * OOooOOo
if 13 - 13: I1IiiI / Ii1I - OoOoOO00 . i1IIi * oO0o * o0oOOo0O0Ooo
class lisp_ddt_root ( ) :
def __init__ ( self ) :
self . root_address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . public_key = ""
self . priority = 0
self . weight = 0
if 5 - 5: I11i - I1Ii111 * I11i - II111iiii + OOooOOo + II111iiii
if 91 - 91: i1IIi + Oo0Ooo - I1ii11iIi11i + I1ii11iIi11i * O0 / O0
if 78 - 78: OoooooooOO
class lisp_referral ( ) :
def __init__ ( self ) :
self . eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . referral_set = { }
self . referral_type = LISP_DDT_ACTION_NULL
self . referral_source = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . referral_ttl = 0
self . uptime = lisp_get_timestamp ( )
self . expires = 0
self . source_cache = None
if 8 - 8: Oo0Ooo - Oo0Ooo % O0 - Ii1I / o0oOOo0O0Ooo % Oo0Ooo
if 51 - 51: iIii1I11I1II1 / iIii1I11I1II1 * I1ii11iIi11i / I11i
def print_referral ( self , eid_indent , referral_indent ) :
I1IiiIIi = lisp_print_elapsed ( self . uptime )
oooO0oOoo0O = lisp_print_future ( self . expires )
lprint ( "{}Referral EID {}, uptime/expires {}/{}, {} referrals:" . format ( eid_indent , green ( self . eid . print_prefix ( ) , False ) , I1IiiIIi ,
# I1IiiI / iII111i / Oo0Ooo
oooO0oOoo0O , len ( self . referral_set ) ) )
if 66 - 66: I1Ii111 + OoooooooOO % I1IiiI . iII111i * Oo0Ooo + o0oOOo0O0Ooo
for IiOO00O00 in self . referral_set . values ( ) :
IiOO00O00 . print_ref_node ( referral_indent )
if 96 - 96: OoO0O00 - ooOoO0o * Ii1I
if 34 - 34: OoO0O00 . Oo0Ooo % Ii1I . IiII + OoOoOO00
if 10 - 10: OoooooooOO * iII111i * ooOoO0o . Ii1I % I1Ii111 / I1ii11iIi11i
def print_referral_type ( self ) :
if ( self . eid . afi == LISP_AFI_ULTIMATE_ROOT ) : return ( "root" )
if ( self . referral_type == LISP_DDT_ACTION_NULL ) :
return ( "null-referral" )
if 71 - 71: Ii1I + IiII
if ( self . referral_type == LISP_DDT_ACTION_SITE_NOT_FOUND ) :
return ( "no-site-action" )
if 10 - 10: II111iiii % o0oOOo0O0Ooo . o0oOOo0O0Ooo % iII111i
if ( self . referral_type > LISP_DDT_ACTION_MAX ) :
return ( "invalid-action" )
if 2 - 2: OoooooooOO / IiII % Oo0Ooo % iIii1I11I1II1
return ( lisp_map_referral_action_string [ self . referral_type ] )
if 62 - 62: oO0o
if 47 - 47: I1IiiI - O0 - I1ii11iIi11i . OoOoOO00
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 98 - 98: o0oOOo0O0Ooo - OoO0O00 . I1ii11iIi11i / OOooOOo
if 43 - 43: I1IiiI + OOooOOo + o0oOOo0O0Ooo
def print_ttl ( self ) :
Ii1 = self . referral_ttl
if ( Ii1 < 60 ) : return ( str ( Ii1 ) + " secs" )
if 44 - 44: o0oOOo0O0Ooo % OoO0O00 . OoooooooOO
if ( ( Ii1 % 60 ) == 0 ) :
Ii1 = str ( Ii1 / 60 ) + " mins"
else :
Ii1 = str ( Ii1 ) + " secs"
if 21 - 21: Oo0Ooo * Oo0Ooo - iII111i - O0
return ( Ii1 )
if 87 - 87: OOooOOo / I1Ii111 - Ii1I + O0 - oO0o - O0
if 68 - 68: iII111i + II111iiii + I1ii11iIi11i * OOooOOo / oO0o
def is_referral_negative ( self ) :
return ( self . referral_type in ( LISP_DDT_ACTION_MS_NOT_REG , LISP_DDT_ACTION_DELEGATION_HOLE ,
# Oo0Ooo + OOooOOo - Oo0Ooo
LISP_DDT_ACTION_NOT_AUTH ) )
if 32 - 32: OoooooooOO
if 99 - 99: II111iiii % Oo0Ooo / OOooOOo / I1ii11iIi11i % O0 + i1IIi
def add_cache ( self ) :
if ( self . group . is_null ( ) ) :
lisp_referral_cache . add_cache ( self . eid , self )
else :
IiIIiIiI1II = lisp_referral_cache . lookup_cache ( self . group , True )
if ( IiIIiIiI1II == None ) :
IiIIiIiI1II = lisp_referral ( )
IiIIiIiI1II . eid . copy_address ( self . group )
IiIIiIiI1II . group . copy_address ( self . group )
lisp_referral_cache . add_cache ( self . group , IiIIiIiI1II )
if 90 - 90: OoOoOO00 % OoO0O00 . I1IiiI * oO0o
if ( self . eid . is_null ( ) ) : self . eid . make_default_route ( IiIIiIiI1II . group )
IiIIiIiI1II . add_source_entry ( self )
if 17 - 17: O0 - i1IIi
if 77 - 77: OOooOOo - i1IIi / II111iiii . I1Ii111 + O0
if 1 - 1: OoooooooOO % iIii1I11I1II1 * I1ii11iIi11i
def delete_cache ( self ) :
if ( self . group . is_null ( ) ) :
lisp_referral_cache . delete_cache ( self . eid )
else :
IiIIiIiI1II = lisp_referral_cache . lookup_cache ( self . group , True )
if ( IiIIiIiI1II == None ) : return
if 17 - 17: Ii1I * i1IIi % OoO0O00
ooo0oOooOO0o0 = IiIIiIiI1II . lookup_source_cache ( self . eid , True )
if ( ooo0oOooOO0o0 == None ) : return
if 12 - 12: I1ii11iIi11i
IiIIiIiI1II . source_cache . delete_cache ( self . eid )
if ( IiIIiIiI1II . source_cache . cache_size ( ) == 0 ) :
lisp_referral_cache . delete_cache ( self . group )
if 86 - 86: iIii1I11I1II1 % iII111i
if 80 - 80: Oo0Ooo
if 37 - 37: i11iIiiIii - I1Ii111
if 50 - 50: I1IiiI / Ii1I / Ii1I + O0 % I11i - i1IIi
def add_source_entry ( self , source_ref ) :
if ( self . source_cache == None ) : self . source_cache = lisp_cache ( )
self . source_cache . add_cache ( source_ref . eid , source_ref )
if 72 - 72: II111iiii . OoO0O00 . II111iiii * I1ii11iIi11i
if 42 - 42: II111iiii
def lookup_source_cache ( self , source , exact ) :
if ( self . source_cache == None ) : return ( None )
return ( self . source_cache . lookup_cache ( source , exact ) )
if 45 - 45: I1ii11iIi11i . I1Ii111 . i1IIi * OOooOOo
if 53 - 53: Ii1I . i11iIiiIii + o0oOOo0O0Ooo % I11i - I1ii11iIi11i * I1ii11iIi11i
if 87 - 87: I1Ii111 % i11iIiiIii + O0
class lisp_referral_node ( ) :
def __init__ ( self ) :
self . referral_address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . priority = 0
self . weight = 0
self . updown = True
self . map_requests_sent = 0
self . no_responses = 0
self . uptime = lisp_get_timestamp ( )
if 67 - 67: OoooooooOO / i1IIi / ooOoO0o . i1IIi - i11iIiiIii . i1IIi
if 41 - 41: i11iIiiIii / ooOoO0o - Ii1I + I11i
def print_ref_node ( self , indent ) :
OOOO0O00o = lisp_print_elapsed ( self . uptime )
lprint ( "{}referral {}, uptime {}, {}, priority/weight: {}/{}" . format ( indent , red ( self . referral_address . print_address ( ) , False ) , OOOO0O00o ,
# i11iIiiIii
"up" if self . updown else "down" , self . priority , self . weight ) )
if 59 - 59: o0oOOo0O0Ooo % iIii1I11I1II1
if 55 - 55: i11iIiiIii / OoOoOO00
if 31 - 31: i1IIi - I1IiiI . I1IiiI * Ii1I
class lisp_ms ( ) :
def __init__ ( self , addr_str , dns_name , ms_name , alg_id , key_id , pw , pr ,
mr , rr , wmn , site_id , ekey_id , ekey ) :
self . ms_name = ms_name if ( ms_name != None ) else "all"
self . dns_name = dns_name
self . map_server = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . last_dns_resolve = None
self . a_record_index = 0
if ( lisp_map_servers_list == { } ) :
self . xtr_id = lisp_get_control_nonce ( )
else :
self . xtr_id = lisp_map_servers_list . values ( ) [ 0 ] . xtr_id
if 80 - 80: OoOoOO00
self . alg_id = alg_id
self . key_id = key_id
self . password = pw
self . proxy_reply = pr
self . merge_registrations = mr
self . refresh_registrations = rr
self . want_map_notify = wmn
self . site_id = site_id
self . map_registers_sent = 0
self . map_registers_multicast_sent = 0
self . map_notifies_received = 0
self . map_notify_acks_sent = 0
self . ekey_id = ekey_id
self . ekey = ekey
if ( addr_str ) :
self . map_server . store_address ( addr_str )
self . insert_ms ( )
else :
self . resolve_dns_name ( )
if 36 - 36: I11i - ooOoO0o - ooOoO0o . I1ii11iIi11i / II111iiii % OOooOOo
if 26 - 26: OoooooooOO / ooOoO0o - iII111i / OoO0O00 . O0 * OOooOOo
if 85 - 85: iIii1I11I1II1 + iII111i + iII111i - ooOoO0o * OoO0O00
def resolve_dns_name ( self ) :
if ( self . dns_name == None ) : return
if ( self . last_dns_resolve and
time . time ( ) - self . last_dns_resolve < 30 ) : return
if 80 - 80: i11iIiiIii / OOooOOo . OoooooooOO % I11i - iII111i * iIii1I11I1II1
try :
o00oo0OO0 = socket . gethostbyname_ex ( self . dns_name )
self . last_dns_resolve = lisp_get_timestamp ( )
iII1ii1 = o00oo0OO0 [ 2 ]
except :
return
if 70 - 70: Oo0Ooo
if 75 - 75: I1Ii111
if 40 - 40: OoO0O00 % Oo0Ooo / OoooooooOO / i11iIiiIii
if 5 - 5: O0 % i11iIiiIii
if 60 - 60: I1ii11iIi11i / I11i
if 100 - 100: I1IiiI
if ( len ( iII1ii1 ) <= self . a_record_index ) :
self . delete_ms ( )
return
if 44 - 44: iIii1I11I1II1 + Oo0Ooo - I1Ii111 . OoooooooOO
if 28 - 28: Ii1I + OOooOOo % IiII . i11iIiiIii - I1IiiI * Oo0Ooo
iIiIi1iI11iiI = iII1ii1 [ self . a_record_index ]
if ( iIiIi1iI11iiI != self . map_server . print_address_no_iid ( ) ) :
self . delete_ms ( )
self . map_server . store_address ( iIiIi1iI11iiI )
self . insert_ms ( )
if 2 - 2: I11i * I1ii11iIi11i + O0
if 44 - 44: iIii1I11I1II1 / II111iiii - ooOoO0o
if 10 - 10: OOooOOo
if 78 - 78: OOooOOo * I1ii11iIi11i % i11iIiiIii % o0oOOo0O0Ooo . I1ii11iIi11i / OoooooooOO
if 12 - 12: iIii1I11I1II1 % OoO0O00 + OOooOOo * iIii1I11I1II1 - iIii1I11I1II1
if 70 - 70: OoO0O00 % i11iIiiIii * IiII . I11i * Oo0Ooo
if ( lisp_is_decent_dns_suffix ( self . dns_name ) == False ) : return
if ( self . a_record_index != 0 ) : return
if 17 - 17: i1IIi
for iIiIi1iI11iiI in iII1ii1 [ 1 : : ] :
ii1iI1iI1 = lisp_address ( LISP_AFI_NONE , iIiIi1iI11iiI , 0 , 0 )
ooooOOoO = lisp_get_map_server ( ii1iI1iI1 )
if ( ooooOOoO != None and ooooOOoO . a_record_index == iII1ii1 . index ( iIiIi1iI11iiI ) ) :
continue
if 29 - 29: OOooOOo % OoO0O00 + oO0o + o0oOOo0O0Ooo . iII111i
ooooOOoO = copy . deepcopy ( self )
ooooOOoO . map_server . store_address ( iIiIi1iI11iiI )
ooooOOoO . a_record_index = iII1ii1 . index ( iIiIi1iI11iiI )
ooooOOoO . last_dns_resolve = lisp_get_timestamp ( )
ooooOOoO . insert_ms ( )
if 14 - 14: i1IIi + OoOoOO00 * oO0o - II111iiii + IiII + OoOoOO00
if 42 - 42: Oo0Ooo + iII111i * ooOoO0o
if 72 - 72: iIii1I11I1II1 % I1Ii111
if 77 - 77: I1Ii111 * I1IiiI / iIii1I11I1II1 . II111iiii * Oo0Ooo
if 71 - 71: ooOoO0o / iIii1I11I1II1 % O0 / I1ii11iIi11i . I1Ii111 / i11iIiiIii
iIIii1III = [ ]
for ooooOOoO in lisp_map_servers_list . values ( ) :
if ( self . dns_name != ooooOOoO . dns_name ) : continue
ii1iI1iI1 = ooooOOoO . map_server . print_address_no_iid ( )
if ( ii1iI1iI1 in iII1ii1 ) : continue
iIIii1III . append ( ooooOOoO )
if 6 - 6: oO0o . OoO0O00 - II111iiii . I1IiiI - o0oOOo0O0Ooo - i1IIi
for ooooOOoO in iIIii1III : ooooOOoO . delete_ms ( )
if 42 - 42: Ii1I + i11iIiiIii
if 46 - 46: O0 % OoOoOO00 - I1Ii111 . I1IiiI
def insert_ms ( self ) :
Iiii11 = self . ms_name + self . map_server . print_address ( )
lisp_map_servers_list [ Iiii11 ] = self
if 66 - 66: II111iiii * iIii1I11I1II1 * ooOoO0o * I11i . II111iiii - ooOoO0o
if 15 - 15: I1ii11iIi11i - i11iIiiIii - Ii1I / Ii1I . iII111i
def delete_ms ( self ) :
Iiii11 = self . ms_name + self . map_server . print_address ( )
if ( lisp_map_servers_list . has_key ( Iiii11 ) == False ) : return
lisp_map_servers_list . pop ( Iiii11 )
if 36 - 36: oO0o + Oo0Ooo * I1Ii111 % OOooOOo . Oo0Ooo . I1IiiI
if 81 - 81: o0oOOo0O0Ooo . OoOoOO00 . i11iIiiIii
if 13 - 13: i1IIi
class lisp_interface ( ) :
def __init__ ( self , device ) :
self . interface_name = ""
self . device = device
self . instance_id = None
self . bridge_socket = None
self . raw_socket = None
self . dynamic_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . dynamic_eid_device = None
self . dynamic_eid_timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
self . multi_tenant_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 70 - 70: O0 / II111iiii
if 98 - 98: OoOoOO00 - O0 . O0 + ooOoO0o * iIii1I11I1II1
def add_interface ( self ) :
lisp_myinterfaces [ self . device ] = self
if 7 - 7: IiII * OoOoOO00 + iIii1I11I1II1 / OoOoOO00 + Oo0Ooo / o0oOOo0O0Ooo
if 77 - 77: i1IIi . I1IiiI
def get_instance_id ( self ) :
return ( self . instance_id )
if 59 - 59: O0 + OoooooooOO - i1IIi
if 87 - 87: IiII * OoooooooOO / Oo0Ooo % iIii1I11I1II1 % oO0o
def get_socket ( self ) :
return ( self . raw_socket )
if 97 - 97: ooOoO0o % i1IIi . IiII / Oo0Ooo . I1Ii111 . OoO0O00
if 12 - 12: I1IiiI
def get_bridge_socket ( self ) :
return ( self . bridge_socket )
if 99 - 99: II111iiii - OoOoOO00
if 22 - 22: i11iIiiIii * II111iiii
def does_dynamic_eid_match ( self , eid ) :
if ( self . dynamic_eid . is_null ( ) ) : return ( False )
return ( eid . is_more_specific ( self . dynamic_eid ) )
if 11 - 11: Oo0Ooo % i1IIi
if 70 - 70: II111iiii * Oo0Ooo * OOooOOo - I1IiiI + iIii1I11I1II1 + ooOoO0o
def set_socket ( self , device ) :
o00oOOO = socket . socket ( socket . AF_INET , socket . SOCK_RAW , socket . IPPROTO_RAW )
o00oOOO . setsockopt ( socket . SOL_IP , socket . IP_HDRINCL , 1 )
try :
o00oOOO . setsockopt ( socket . SOL_SOCKET , socket . SO_BINDTODEVICE , device )
except :
o00oOOO . close ( )
o00oOOO = None
if 27 - 27: I1ii11iIi11i - I1Ii111 * O0 % ooOoO0o / I1IiiI
self . raw_socket = o00oOOO
if 53 - 53: i11iIiiIii * i11iIiiIii % O0 % IiII
if 57 - 57: I1IiiI % i1IIi * OoO0O00 + I1Ii111 . I11i % I11i
def set_bridge_socket ( self , device ) :
o00oOOO = socket . socket ( socket . PF_PACKET , socket . SOCK_RAW )
try :
o00oOOO = o00oOOO . bind ( ( device , 0 ) )
self . bridge_socket = o00oOOO
except :
return
if 69 - 69: I1ii11iIi11i / OoOoOO00 + iIii1I11I1II1
if 8 - 8: OoooooooOO
if 72 - 72: OoooooooOO % I1ii11iIi11i - OoO0O00 . OoooooooOO
if 83 - 83: o0oOOo0O0Ooo * Ii1I - Oo0Ooo * iII111i - i11iIiiIii
class lisp_datetime ( ) :
def __init__ ( self , datetime_str ) :
self . datetime_name = datetime_str
self . datetime = None
self . parse_datetime ( )
if 6 - 6: I1IiiI + i11iIiiIii + O0 / i1IIi
if 50 - 50: iII111i . II111iiii % I1Ii111 % I1IiiI / o0oOOo0O0Ooo . I1IiiI
def valid_datetime ( self ) :
oO00OOoOOoO = self . datetime_name
if ( oO00OOoOOoO . find ( ":" ) == - 1 ) : return ( False )
if ( oO00OOoOOoO . find ( "-" ) == - 1 ) : return ( False )
O0OOo00 , I1II1II1IiI , o0oOo0o0 , time = oO00OOoOOoO [ 0 : 4 ] , oO00OOoOOoO [ 5 : 7 ] , oO00OOoOOoO [ 8 : 10 ] , oO00OOoOOoO [ 11 : : ]
if 90 - 90: I11i . O0 + oO0o
if ( ( O0OOo00 + I1II1II1IiI + o0oOo0o0 ) . isdigit ( ) == False ) : return ( False )
if ( I1II1II1IiI < "01" and I1II1II1IiI > "12" ) : return ( False )
if ( o0oOo0o0 < "01" and o0oOo0o0 > "31" ) : return ( False )
if 63 - 63: I11i . I1IiiI + OoooooooOO + O0
Oo00O0O0oOOO , I1I11iIi , Oo000 = time . split ( ":" )
if 87 - 87: OoooooooOO + OOooOOo - I1IiiI + I1Ii111
if ( ( Oo00O0O0oOOO + I1I11iIi + Oo000 ) . isdigit ( ) == False ) : return ( False )
if ( Oo00O0O0oOOO < "00" and Oo00O0O0oOOO > "23" ) : return ( False )
if ( I1I11iIi < "00" and I1I11iIi > "59" ) : return ( False )
if ( Oo000 < "00" and Oo000 > "59" ) : return ( False )
return ( True )
if 92 - 92: ooOoO0o * I11i % iIii1I11I1II1 + Ii1I - OoOoOO00
if 31 - 31: OoooooooOO
def parse_datetime ( self ) :
Ooo0o0o0o = self . datetime_name
Ooo0o0o0o = Ooo0o0o0o . replace ( "-" , "" )
Ooo0o0o0o = Ooo0o0o0o . replace ( ":" , "" )
self . datetime = int ( Ooo0o0o0o )
if 86 - 86: i1IIi . oO0o % OOooOOo
if 99 - 99: oO0o / I1Ii111 * oO0o * I11i
def now ( self ) :
OOOO0O00o = datetime . datetime . now ( ) . strftime ( "%Y-%m-%d-%H:%M:%S" )
OOOO0O00o = lisp_datetime ( OOOO0O00o )
return ( OOOO0O00o )
if 38 - 38: o0oOOo0O0Ooo + OoOoOO00
if 24 - 24: Ii1I - OOooOOo - o0oOOo0O0Ooo - I1Ii111 / OoooooooOO
def print_datetime ( self ) :
return ( self . datetime_name )
if 17 - 17: OoO0O00
if 79 - 79: Ii1I - II111iiii
def future ( self ) :
return ( self . datetime > self . now ( ) . datetime )
if 57 - 57: II111iiii / OoooooooOO
if 4 - 4: I11i * OoOoOO00
def past ( self ) :
return ( self . future ( ) == False )
if 18 - 18: iIii1I11I1II1 % OOooOOo - I1ii11iIi11i * i1IIi + Oo0Ooo
if 87 - 87: oO0o . I11i
def now_in_range ( self , upper ) :
return ( self . past ( ) and upper . future ( ) )
if 15 - 15: oO0o
if 45 - 45: Oo0Ooo * IiII * OoO0O00 + iIii1I11I1II1
def this_year ( self ) :
O0oO0oOOO0oO = str ( self . now ( ) . datetime ) [ 0 : 4 ]
OOOO0O00o = str ( self . datetime ) [ 0 : 4 ]
return ( OOOO0O00o == O0oO0oOOO0oO )
if 22 - 22: o0oOOo0O0Ooo * O0 % Oo0Ooo
if 52 - 52: I1IiiI % I1Ii111 - i1IIi . o0oOOo0O0Ooo % I1ii11iIi11i
def this_month ( self ) :
O0oO0oOOO0oO = str ( self . now ( ) . datetime ) [ 0 : 6 ]
OOOO0O00o = str ( self . datetime ) [ 0 : 6 ]
return ( OOOO0O00o == O0oO0oOOO0oO )
if 34 - 34: o0oOOo0O0Ooo / OoOoOO00
if 74 - 74: IiII + i1IIi . II111iiii
def today ( self ) :
O0oO0oOOO0oO = str ( self . now ( ) . datetime ) [ 0 : 8 ]
OOOO0O00o = str ( self . datetime ) [ 0 : 8 ]
return ( OOOO0O00o == O0oO0oOOO0oO )
if 1 - 1: Ii1I - o0oOOo0O0Ooo / i11iIiiIii
if 24 - 24: O0
if 59 - 59: OoO0O00 % iII111i + oO0o * II111iiii . OOooOOo
if 26 - 26: OOooOOo % OoooooooOO . Ii1I / iIii1I11I1II1 * I1IiiI
if 85 - 85: IiII / Ii1I - I1ii11iIi11i * OOooOOo
if 19 - 19: I1ii11iIi11i
class lisp_policy_match ( ) :
def __init__ ( self ) :
self . source_eid = None
self . dest_eid = None
self . source_rloc = None
self . dest_rloc = None
self . rloc_record_name = None
self . geo_name = None
self . elp_name = None
self . rle_name = None
self . json_name = None
self . datetime_lower = None
self . datetime_upper = None
if 12 - 12: ooOoO0o * I1ii11iIi11i * O0 / oO0o + iII111i - iIii1I11I1II1
if 81 - 81: Ii1I
class lisp_policy ( ) :
def __init__ ( self , policy_name ) :
self . policy_name = policy_name
self . match_clauses = [ ]
self . set_action = None
self . set_record_ttl = None
self . set_source_eid = None
self . set_dest_eid = None
self . set_rloc_address = None
self . set_rloc_record_name = None
self . set_geo_name = None
self . set_elp_name = None
self . set_rle_name = None
self . set_json_name = None
if 87 - 87: O0 % iII111i
if 57 - 57: Ii1I
def match_policy_map_request ( self , mr , srloc ) :
for i1ii1I11iIII in self . match_clauses :
i111 = i1ii1I11iIII . source_eid
O00o00oOOo = mr . source_eid
if ( i111 and O00o00oOOo and O00o00oOOo . is_more_specific ( i111 ) == False ) : continue
if 49 - 49: I11i
i111 = i1ii1I11iIII . dest_eid
O00o00oOOo = mr . target_eid
if ( i111 and O00o00oOOo and O00o00oOOo . is_more_specific ( i111 ) == False ) : continue
if 22 - 22: Oo0Ooo % OOooOOo + O0 - OoO0O00 % I11i * O0
i111 = i1ii1I11iIII . source_rloc
O00o00oOOo = srloc
if ( i111 and O00o00oOOo and O00o00oOOo . is_more_specific ( i111 ) == False ) : continue
II1Ooo0000o00OO = i1ii1I11iIII . datetime_lower
iIiooooOooOO0 = i1ii1I11iIII . datetime_upper
if ( II1Ooo0000o00OO and iIiooooOooOO0 and II1Ooo0000o00OO . now_in_range ( iIiooooOooOO0 ) == False ) : continue
return ( True )
if 20 - 20: I11i + IiII
return ( False )
if 44 - 44: OoooooooOO % I11i / O0
if 94 - 94: IiII
def set_policy_map_reply ( self ) :
oOo0O0II111II = ( self . set_rloc_address == None and
self . set_rloc_record_name == None and self . set_geo_name == None and
self . set_elp_name == None and self . set_rle_name == None )
if ( oOo0O0II111II ) : return ( None )
if 33 - 33: I1Ii111
Oo0o0o0oo = lisp_rloc ( )
if ( self . set_rloc_address ) :
Oo0o0o0oo . rloc . copy_address ( self . set_rloc_address )
iIiIi1iI11iiI = Oo0o0o0oo . rloc . print_address_no_iid ( )
lprint ( "Policy set-rloc-address to {}" . format ( iIiIi1iI11iiI ) )
if 97 - 97: Ii1I / iII111i - ooOoO0o + IiII * OoOoOO00 - OOooOOo
if ( self . set_rloc_record_name ) :
Oo0o0o0oo . rloc_name = self . set_rloc_record_name
i1i1Ii = blue ( Oo0o0o0oo . rloc_name , False )
lprint ( "Policy set-rloc-record-name to {}" . format ( i1i1Ii ) )
if 43 - 43: oO0o / II111iiii - iII111i / oO0o
if ( self . set_geo_name ) :
Oo0o0o0oo . geo_name = self . set_geo_name
i1i1Ii = Oo0o0o0oo . geo_name
oO0oII11i = "" if lisp_geo_list . has_key ( i1i1Ii ) else "(not configured)"
if 76 - 76: iII111i
lprint ( "Policy set-geo-name '{}' {}" . format ( i1i1Ii , oO0oII11i ) )
if 48 - 48: OOooOOo % I1Ii111 % ooOoO0o . I1ii11iIi11i * O0 . O0
if ( self . set_elp_name ) :
Oo0o0o0oo . elp_name = self . set_elp_name
i1i1Ii = Oo0o0o0oo . elp_name
oO0oII11i = "" if lisp_elp_list . has_key ( i1i1Ii ) else "(not configured)"
if 25 - 25: O0 - Ii1I - IiII
lprint ( "Policy set-elp-name '{}' {}" . format ( i1i1Ii , oO0oII11i ) )
if 72 - 72: Ii1I % O0 + II111iiii . i11iIiiIii
if ( self . set_rle_name ) :
Oo0o0o0oo . rle_name = self . set_rle_name
i1i1Ii = Oo0o0o0oo . rle_name
oO0oII11i = "" if lisp_rle_list . has_key ( i1i1Ii ) else "(not configured)"
if 66 - 66: II111iiii % I1IiiI
lprint ( "Policy set-rle-name '{}' {}" . format ( i1i1Ii , oO0oII11i ) )
if 88 - 88: iIii1I11I1II1 * iIii1I11I1II1 + I1Ii111 * OOooOOo . I1IiiI
if ( self . set_json_name ) :
Oo0o0o0oo . json_name = self . set_json_name
i1i1Ii = Oo0o0o0oo . json_name
oO0oII11i = "" if lisp_json_list . has_key ( i1i1Ii ) else "(not configured)"
if 96 - 96: I1ii11iIi11i
lprint ( "Policy set-json-name '{}' {}" . format ( i1i1Ii , oO0oII11i ) )
if 37 - 37: OoO0O00 % o0oOOo0O0Ooo * O0 * O0 + iII111i
return ( Oo0o0o0oo )
if 18 - 18: i11iIiiIii . o0oOOo0O0Ooo - OOooOOo % oO0o * Ii1I / I1IiiI
if 46 - 46: o0oOOo0O0Ooo . ooOoO0o / Ii1I
def save_policy ( self ) :
lisp_policies [ self . policy_name ] = self
if 97 - 97: Ii1I . Oo0Ooo - O0 - I1Ii111 . i1IIi
if 47 - 47: IiII * ooOoO0o - i1IIi % OoOoOO00 * i11iIiiIii . OoooooooOO
if 84 - 84: OoOoOO00 / IiII - i1IIi - I1IiiI * OOooOOo
class lisp_pubsub ( ) :
def __init__ ( self , itr , port , nonce , ttl , xtr_id ) :
self . itr = itr
self . port = port
self . nonce = nonce
self . uptime = lisp_get_timestamp ( )
self . ttl = ttl
self . xtr_id = xtr_id
self . map_notify_count = 0
if 35 - 35: II111iiii
if 28 - 28: I1Ii111 + IiII + I1ii11iIi11i . Ii1I
def add ( self , eid_prefix ) :
Ii1 = self . ttl
Oo00o = eid_prefix . print_prefix ( )
if ( lisp_pubsub_cache . has_key ( Oo00o ) == False ) :
lisp_pubsub_cache [ Oo00o ] = { }
if 82 - 82: ooOoO0o - ooOoO0o . Ii1I . i11iIiiIii % Ii1I + OOooOOo
I1i11 = lisp_pubsub_cache [ Oo00o ]
if 33 - 33: Oo0Ooo - OOooOOo / OoOoOO00 % II111iiii % OOooOOo + I1Ii111
I1iIiI = "Add"
if ( I1i11 . has_key ( self . xtr_id ) ) :
I1iIiI = "Replace"
del ( I1i11 [ self . xtr_id ] )
if 4 - 4: i11iIiiIii + OoOoOO00 - Ii1I * i1IIi * i11iIiiIii
I1i11 [ self . xtr_id ] = self
if 46 - 46: IiII . iII111i % OoooooooOO % IiII + Ii1I - OoooooooOO
Oo00o = green ( Oo00o , False )
OooOoOOo0 = red ( self . itr . print_address_no_iid ( ) , False )
Oo0O0 = "0x" + lisp_hex_string ( self . xtr_id )
lprint ( "{} pubsub state {} for {}, xtr-id: {}, ttl {}" . format ( I1iIiI , Oo00o ,
OooOoOOo0 , Oo0O0 , Ii1 ) )
if 23 - 23: O0 - iII111i
if 18 - 18: II111iiii % i11iIiiIii + I11i - OOooOOo
def delete ( self , eid_prefix ) :
Oo00o = eid_prefix . print_prefix ( )
OooOoOOo0 = red ( self . itr . print_address_no_iid ( ) , False )
Oo0O0 = "0x" + lisp_hex_string ( self . xtr_id )
if ( lisp_pubsub_cache . has_key ( Oo00o ) ) :
I1i11 = lisp_pubsub_cache [ Oo00o ]
if ( I1i11 . has_key ( self . xtr_id ) ) :
I1i11 . pop ( self . xtr_id )
lprint ( "Remove pubsub state {} for {}, xtr-id: {}" . format ( Oo00o ,
OooOoOOo0 , Oo0O0 ) )
if 100 - 100: o0oOOo0O0Ooo / Ii1I - iIii1I11I1II1 / oO0o
if 68 - 68: I11i / II111iiii * oO0o . II111iiii * OOooOOo
if 78 - 78: I11i * OoO0O00 / II111iiii
if 86 - 86: I1Ii111 % II111iiii
if 90 - 90: OoO0O00 / I11i - Oo0Ooo
if 76 - 76: O0 + OoO0O00 / ooOoO0o . II111iiii * iIii1I11I1II1 . I1Ii111
if 43 - 43: Oo0Ooo + o0oOOo0O0Ooo % o0oOOo0O0Ooo % I1ii11iIi11i / iIii1I11I1II1 . I1ii11iIi11i
if 59 - 59: IiII . OoO0O00 - OoooooooOO . O0
if 33 - 33: Ii1I
if 95 - 95: OoooooooOO + OoO0O00 * ooOoO0o
if 40 - 40: I1IiiI / OOooOOo * Ii1I
if 98 - 98: I1IiiI
if 4 - 4: I1IiiI % O0 / Oo0Ooo / O0
if 90 - 90: ooOoO0o - O0 . IiII - O0 . iIii1I11I1II1
if 42 - 42: I1ii11iIi11i
if 51 - 51: iII111i % i11iIiiIii . OoO0O00 . IiII - OoOoOO00 * i1IIi
if 14 - 14: I1ii11iIi11i . OoO0O00
if 26 - 26: iII111i / ooOoO0o / Oo0Ooo / Oo0Ooo . I1ii11iIi11i * OOooOOo
if 25 - 25: IiII % I1IiiI / O0 % OOooOOo - OoooooooOO
if 29 - 29: O0 + iII111i
if 4 - 4: I11i * I11i - Ii1I * oO0o . I1ii11iIi11i % o0oOOo0O0Ooo
if 33 - 33: Ii1I * i11iIiiIii / O0 . Oo0Ooo + i1IIi . OoOoOO00
class lisp_trace ( ) :
def __init__ ( self ) :
self . nonce = lisp_get_control_nonce ( )
self . packet_json = [ ]
self . local_rloc = None
self . local_port = None
self . lisp_socket = None
if 76 - 76: OoooooooOO - O0
if 17 - 17: Oo0Ooo % I1Ii111 . oO0o - O0
def print_trace ( self ) :
iiiIIi1Iii = self . packet_json
lprint ( "LISP-Trace JSON: '{}'" . format ( iiiIIi1Iii ) )
if 39 - 39: iII111i - I1ii11iIi11i % ooOoO0o - OoOoOO00 + OoOoOO00
if 97 - 97: I11i * I1Ii111 * oO0o
def encode ( self ) :
O0oooOO = socket . htonl ( 0x90000000 )
oOo = struct . pack ( "II" , O0oooOO , 0 )
oOo += struct . pack ( "Q" , self . nonce )
oOo += json . dumps ( self . packet_json )
return ( oOo )
if 3 - 3: iIii1I11I1II1 / ooOoO0o + ooOoO0o + I11i
if 20 - 20: OOooOOo - i1IIi / i11iIiiIii
def decode ( self , packet ) :
IIiI1I11ii1i = "I"
i1II1i1iiI1 = struct . calcsize ( IIiI1I11ii1i )
if ( len ( packet ) < i1II1i1iiI1 ) : return ( False )
O0oooOO = struct . unpack ( IIiI1I11ii1i , packet [ : i1II1i1iiI1 ] ) [ 0 ]
packet = packet [ i1II1i1iiI1 : : ]
O0oooOO = socket . ntohl ( O0oooOO )
if ( ( O0oooOO & 0xff000000 ) != 0x90000000 ) : return ( False )
if 60 - 60: I11i * I11i + Oo0Ooo . IiII / iII111i % OoooooooOO
if ( len ( packet ) < i1II1i1iiI1 ) : return ( False )
iIiIi1iI11iiI = struct . unpack ( IIiI1I11ii1i , packet [ : i1II1i1iiI1 ] ) [ 0 ]
packet = packet [ i1II1i1iiI1 : : ]
if 35 - 35: O0 . Oo0Ooo / Oo0Ooo / Ii1I / i1IIi * I11i
iIiIi1iI11iiI = socket . ntohl ( iIiIi1iI11iiI )
oo00oo = iIiIi1iI11iiI >> 24
oo0ooo000OO0o = ( iIiIi1iI11iiI >> 16 ) & 0xff
ooOo0OOo = ( iIiIi1iI11iiI >> 8 ) & 0xff
O00OO0ooo = iIiIi1iI11iiI & 0xff
self . local_rloc = "{}.{}.{}.{}" . format ( oo00oo , oo0ooo000OO0o , ooOo0OOo , O00OO0ooo )
self . local_port = str ( O0oooOO & 0xffff )
if 22 - 22: OOooOOo
IIiI1I11ii1i = "Q"
i1II1i1iiI1 = struct . calcsize ( IIiI1I11ii1i )
if ( len ( packet ) < i1II1i1iiI1 ) : return ( False )
self . nonce = struct . unpack ( IIiI1I11ii1i , packet [ : i1II1i1iiI1 ] ) [ 0 ]
packet = packet [ i1II1i1iiI1 : : ]
if ( len ( packet ) == 0 ) : return ( True )
if 7 - 7: O0 - I1ii11iIi11i - OoO0O00 * I1Ii111
try :
self . packet_json = json . loads ( packet )
except :
return ( False )
if 17 - 17: o0oOOo0O0Ooo % OoO0O00 - I11i * o0oOOo0O0Ooo - i1IIi / I1IiiI
return ( True )
if 100 - 100: OoO0O00 * i1IIi * o0oOOo0O0Ooo * Oo0Ooo - o0oOOo0O0Ooo
if 100 - 100: iII111i - i11iIiiIii + OoO0O00
def myeid ( self , eid ) :
return ( lisp_is_myeid ( eid ) )
if 50 - 50: II111iiii
if 42 - 42: OOooOOo * I1Ii111
def return_to_sender ( self , lisp_socket , rts_rloc , packet ) :
Oo0o0o0oo , Iiiii = self . rtr_cache_nat_trace_find ( rts_rloc )
if ( Oo0o0o0oo == None ) :
Oo0o0o0oo , Iiiii = rts_rloc . split ( ":" )
Iiiii = int ( Iiiii )
lprint ( "Send LISP-Trace to address {}:{}" . format ( Oo0o0o0oo , Iiiii ) )
else :
lprint ( "Send LISP-Trace to translated address {}:{}" . format ( Oo0o0o0oo ,
Iiiii ) )
if 53 - 53: II111iiii % OOooOOo / I1ii11iIi11i * OoOoOO00 % I1ii11iIi11i * iII111i
if 91 - 91: iII111i . OoooooooOO
if ( lisp_socket == None ) :
o00oOOO = socket . socket ( socket . AF_INET , socket . SOCK_DGRAM )
o00oOOO . bind ( ( "0.0.0.0" , LISP_TRACE_PORT ) )
o00oOOO . sendto ( packet , ( Oo0o0o0oo , Iiiii ) )
o00oOOO . close ( )
else :
lisp_socket . sendto ( packet , ( Oo0o0o0oo , Iiiii ) )
if 90 - 90: i11iIiiIii - I1IiiI
if 39 - 39: iII111i % OoooooooOO % Ii1I % I1IiiI
if 63 - 63: OoO0O00 - I1Ii111 - II111iiii
def packet_length ( self ) :
OOOOo00oo00O = 8 ; OoOooO00 = 4 + 4 + 8
return ( OOOOo00oo00O + OoOooO00 + len ( json . dumps ( self . packet_json ) ) )
if 66 - 66: i1IIi + I1IiiI
if 45 - 45: I1Ii111 . iII111i + OoO0O00 - O0
def rtr_cache_nat_trace ( self , translated_rloc , translated_port ) :
Iiii11 = self . local_rloc + ":" + self . local_port
ooOo0O0O0oOO0 = ( translated_rloc , translated_port )
lisp_rtr_nat_trace_cache [ Iiii11 ] = ooOo0O0O0oOO0
lprint ( "Cache NAT Trace addresses {} -> {}" . format ( Iiii11 , ooOo0O0O0oOO0 ) )
if 71 - 71: Oo0Ooo + OOooOOo
if 94 - 94: OOooOOo
def rtr_cache_nat_trace_find ( self , local_rloc_and_port ) :
Iiii11 = local_rloc_and_port
try : ooOo0O0O0oOO0 = lisp_rtr_nat_trace_cache [ Iiii11 ]
except : ooOo0O0O0oOO0 = ( None , None )
return ( ooOo0O0O0oOO0 )
if 81 - 81: i11iIiiIii + iIii1I11I1II1 . i11iIiiIii / OOooOOo / iII111i
if 34 - 34: i11iIiiIii - o0oOOo0O0Ooo * OoooooooOO * I1ii11iIi11i * Oo0Ooo % I1ii11iIi11i
if 31 - 31: I11i . o0oOOo0O0Ooo
if 82 - 82: I11i - Oo0Ooo
if 77 - 77: I1IiiI + OoO0O00 % iIii1I11I1II1 - OOooOOo
if 80 - 80: oO0o % I1ii11iIi11i * I1Ii111 + i1IIi
if 79 - 79: oO0o + IiII
if 4 - 4: iII111i + OoooooooOO / I1Ii111
if 57 - 57: I1IiiI . iIii1I11I1II1 % iII111i * iII111i / I1Ii111
if 30 - 30: O0 / I11i % OoOoOO00 * I1Ii111 / O0 % ooOoO0o
if 36 - 36: iIii1I11I1II1 . iII111i * I1IiiI . I1IiiI - IiII
def lisp_get_map_server ( address ) :
for ooooOOoO in lisp_map_servers_list . values ( ) :
if ( ooooOOoO . map_server . is_exact_match ( address ) ) : return ( ooooOOoO )
if 39 - 39: O0 / ooOoO0o + I11i - OoOoOO00 * o0oOOo0O0Ooo - OoO0O00
return ( None )
if 97 - 97: i11iIiiIii / O0 % OoO0O00
if 88 - 88: i1IIi . I1IiiI
if 8 - 8: I1ii11iIi11i . OoO0O00 % o0oOOo0O0Ooo / O0
if 51 - 51: oO0o + Ii1I * Ii1I * I1ii11iIi11i % I11i - I1ii11iIi11i
if 15 - 15: i1IIi / OoO0O00 - Oo0Ooo
if 74 - 74: o0oOOo0O0Ooo % Ii1I - II111iiii / ooOoO0o
if 84 - 84: I1IiiI + OOooOOo
def lisp_get_any_map_server ( ) :
for ooooOOoO in lisp_map_servers_list . values ( ) : return ( ooooOOoO )
return ( None )
if 80 - 80: OOooOOo / OoOoOO00
if 93 - 93: OOooOOo
if 82 - 82: iIii1I11I1II1 + OoO0O00 / iIii1I11I1II1 . iIii1I11I1II1
if 36 - 36: iII111i % I1ii11iIi11i + OoOoOO00 - i11iIiiIii % II111iiii % I11i
if 92 - 92: O0 * OoooooooOO + I1ii11iIi11i / IiII
if 97 - 97: o0oOOo0O0Ooo . Ii1I + I1Ii111
if 72 - 72: i11iIiiIii . iII111i . Ii1I * I1ii11iIi11i
if 49 - 49: OoOoOO00 - O0 % I11i - ooOoO0o * OOooOOo
if 58 - 58: OoooooooOO - OOooOOo * oO0o / Ii1I . IiII
if 50 - 50: IiII . OOooOOo + I1ii11iIi11i - OoooooooOO
def lisp_get_map_resolver ( address , eid ) :
if ( address != None ) :
iIiIi1iI11iiI = address . print_address ( )
Ii1IIi1III1i = None
for Iiii11 in lisp_map_resolvers_list :
if ( Iiii11 . find ( iIiIi1iI11iiI ) == - 1 ) : continue
Ii1IIi1III1i = lisp_map_resolvers_list [ Iiii11 ]
if 2 - 2: o0oOOo0O0Ooo % ooOoO0o / O0 / i11iIiiIii
return ( Ii1IIi1III1i )
if 91 - 91: II111iiii * o0oOOo0O0Ooo
if 20 - 20: iIii1I11I1II1 % Oo0Ooo * OoOoOO00 % IiII
if 93 - 93: I11i * iIii1I11I1II1 * oO0o
if 74 - 74: I1IiiI
if 39 - 39: iII111i * IiII / iII111i * IiII % I1ii11iIi11i
if 27 - 27: iIii1I11I1II1 . ooOoO0o
if 74 - 74: i1IIi % OoOoOO00
if ( eid == "" ) :
O0o0Ooo0O0OO = ""
elif ( eid == None ) :
O0o0Ooo0O0OO = "all"
else :
iIiIIi1i = lisp_db_for_lookups . lookup_cache ( eid , False )
O0o0Ooo0O0OO = "all" if iIiIIi1i == None else iIiIIi1i . use_mr_name
if 54 - 54: Ii1I % OoO0O00 % I1IiiI % OOooOOo / oO0o + I1IiiI
if 94 - 94: OoOoOO00 . O0
OOoOoooOoO = None
for Ii1IIi1III1i in lisp_map_resolvers_list . values ( ) :
if ( O0o0Ooo0O0OO == "" ) : return ( Ii1IIi1III1i )
if ( Ii1IIi1III1i . mr_name != O0o0Ooo0O0OO ) : continue
if ( OOoOoooOoO == None or Ii1IIi1III1i . last_used < OOoOoooOoO . last_used ) : OOoOoooOoO = Ii1IIi1III1i
if 100 - 100: Ii1I
return ( OOoOoooOoO )
if 73 - 73: IiII - O0
if 54 - 54: OOooOOo
if 28 - 28: i1IIi - Oo0Ooo * OoO0O00 + OoooooooOO - Ii1I * i11iIiiIii
if 71 - 71: iII111i - OOooOOo / iIii1I11I1II1 % i11iIiiIii
if 39 - 39: o0oOOo0O0Ooo
if 32 - 32: iIii1I11I1II1 . II111iiii / IiII % O0 / iII111i
if 97 - 97: iIii1I11I1II1
if 18 - 18: OOooOOo
def lisp_get_decent_map_resolver ( eid ) :
oo0OOo0O = lisp_get_decent_index ( eid )
Ooooo000 = str ( oo0OOo0O ) + "." + lisp_decent_dns_suffix
if 13 - 13: iIii1I11I1II1 - I1IiiI % o0oOOo0O0Ooo * iIii1I11I1II1
lprint ( "Use LISP-Decent map-resolver {} for EID {}" . format ( bold ( Ooooo000 , False ) , eid . print_prefix ( ) ) )
if 99 - 99: OoooooooOO / II111iiii . I1Ii111
if 62 - 62: OOooOOo . iII111i . I1ii11iIi11i
OOoOoooOoO = None
for Ii1IIi1III1i in lisp_map_resolvers_list . values ( ) :
if ( Ooooo000 != Ii1IIi1III1i . dns_name ) : continue
if ( OOoOoooOoO == None or Ii1IIi1III1i . last_used < OOoOoooOoO . last_used ) : OOoOoooOoO = Ii1IIi1III1i
if 23 - 23: O0
return ( OOoOoooOoO )
if 33 - 33: ooOoO0o - iII111i % IiII
if 67 - 67: II111iiii
if 66 - 66: iIii1I11I1II1 / OOooOOo
if 65 - 65: IiII . oO0o + O0 - i11iIiiIii + iIii1I11I1II1
if 82 - 82: iIii1I11I1II1 * iII111i + iIii1I11I1II1 / OoO0O00 + O0
if 67 - 67: I1Ii111
if 94 - 94: I1Ii111 % iIii1I11I1II1 - II111iiii . ooOoO0o + i11iIiiIii - i11iIiiIii
def lisp_ipv4_input ( packet ) :
if 55 - 55: OoooooooOO % iIii1I11I1II1 % I1ii11iIi11i % i1IIi
if 46 - 46: I11i - ooOoO0o . I1IiiI
if 36 - 36: I11i + OoO0O00 * O0 * OoOoOO00 * iII111i
if 90 - 90: i11iIiiIii / i1IIi
I11i11I = struct . unpack ( "H" , packet [ 10 : 12 ] ) [ 0 ]
if ( I11i11I == 0 ) :
dprint ( "Packet arrived with checksum of 0!" )
else :
packet = lisp_ip_checksum ( packet )
I11i11I = struct . unpack ( "H" , packet [ 10 : 12 ] ) [ 0 ]
if ( I11i11I != 0 ) :
dprint ( "IPv4 header checksum failed for inner header" )
packet = lisp_format_packet ( packet [ 0 : 20 ] )
dprint ( "Packet header: {}" . format ( packet ) )
return ( None )
if 35 - 35: Ii1I . I11i / oO0o / OoOoOO00
if 5 - 5: I1ii11iIi11i . o0oOOo0O0Ooo * iII111i * I1ii11iIi11i % I1Ii111
if 83 - 83: iIii1I11I1II1 * o0oOOo0O0Ooo % i11iIiiIii + OoO0O00 . O0
if 87 - 87: II111iiii - iIii1I11I1II1 % I11i % I1IiiI . o0oOOo0O0Ooo
if 52 - 52: i11iIiiIii . oO0o / OoooooooOO - OoO0O00
if 7 - 7: I1IiiI * I1IiiI % OOooOOo % iIii1I11I1II1 * OoO0O00 . o0oOOo0O0Ooo
if 32 - 32: ooOoO0o / i1IIi
Ii1 = struct . unpack ( "B" , packet [ 8 : 9 ] ) [ 0 ]
if ( Ii1 == 0 ) :
dprint ( "IPv4 packet arrived with ttl 0, packet discarded" )
return ( None )
elif ( Ii1 == 1 ) :
dprint ( "IPv4 packet {}, packet discarded" . format ( bold ( "ttl expiry" , False ) ) )
if 55 - 55: oO0o . OoOoOO00 + OoooooooOO - ooOoO0o . OoooooooOO
return ( None )
if 77 - 77: I1IiiI
if 16 - 16: I1IiiI + ooOoO0o - O0 / o0oOOo0O0Ooo
Ii1 -= 1
packet = packet [ 0 : 8 ] + struct . pack ( "B" , Ii1 ) + packet [ 9 : : ]
packet = packet [ 0 : 10 ] + struct . pack ( "H" , 0 ) + packet [ 12 : : ]
packet = lisp_ip_checksum ( packet )
return ( packet )
if 36 - 36: Oo0Ooo - OoOoOO00 - II111iiii
if 25 - 25: i11iIiiIii + II111iiii * OOooOOo % OOooOOo
if 87 - 87: I11i % Ii1I % Oo0Ooo . II111iiii / oO0o
if 19 - 19: O0 . OOooOOo + I1Ii111 * I1ii11iIi11i
if 91 - 91: o0oOOo0O0Ooo / oO0o . o0oOOo0O0Ooo + IiII + ooOoO0o . I1Ii111
if 90 - 90: i1IIi + oO0o * oO0o / ooOoO0o . IiII
if 98 - 98: I11i % OoO0O00 . iII111i - o0oOOo0O0Ooo
def lisp_ipv6_input ( packet ) :
iIi11i1I11Ii = packet . inner_dest
packet = packet . packet
if 92 - 92: I11i
if 34 - 34: I1IiiI % iIii1I11I1II1 . I1ii11iIi11i * Oo0Ooo * iIii1I11I1II1 / O0
if 98 - 98: iII111i % IiII + OoO0O00
if 23 - 23: OOooOOo
if 83 - 83: I1ii11iIi11i / O0 * II111iiii + IiII + Oo0Ooo
Ii1 = struct . unpack ( "B" , packet [ 7 : 8 ] ) [ 0 ]
if ( Ii1 == 0 ) :
dprint ( "IPv6 packet arrived with hop-limit 0, packet discarded" )
return ( None )
elif ( Ii1 == 1 ) :
dprint ( "IPv6 packet {}, packet discarded" . format ( bold ( "ttl expiry" , False ) ) )
if 99 - 99: II111iiii + O0
return ( None )
if 94 - 94: ooOoO0o * ooOoO0o + o0oOOo0O0Ooo . iII111i % iIii1I11I1II1 + Ii1I
if 88 - 88: Oo0Ooo . iII111i
if 89 - 89: OOooOOo + I1Ii111 % i11iIiiIii + Oo0Ooo / Oo0Ooo + OoO0O00
if 9 - 9: OoOoOO00 % i1IIi + IiII
if 19 - 19: I1Ii111 - II111iiii / I1Ii111 + I1IiiI - OoooooooOO + o0oOOo0O0Ooo
if ( iIi11i1I11Ii . is_ipv6_link_local ( ) ) :
dprint ( "Do not encapsulate IPv6 link-local packets" )
return ( None )
if 100 - 100: OoO0O00 / OoOoOO00 / OOooOOo / OoO0O00
if 95 - 95: ooOoO0o
Ii1 -= 1
packet = packet [ 0 : 7 ] + struct . pack ( "B" , Ii1 ) + packet [ 8 : : ]
return ( packet )
if 95 - 95: Ii1I + i1IIi . I1IiiI % I1Ii111 / Ii1I * O0
if 68 - 68: I1Ii111 - IiII - oO0o - Oo0Ooo - o0oOOo0O0Ooo
if 32 - 32: OoOoOO00 % i11iIiiIii
if 53 - 53: I1Ii111 * Ii1I / IiII . i1IIi * II111iiii / o0oOOo0O0Ooo
if 44 - 44: I1Ii111 + ooOoO0o
if 15 - 15: I11i + OoO0O00 + OoOoOO00
if 100 - 100: I1Ii111
if 78 - 78: OoOoOO00
def lisp_mac_input ( packet ) :
return ( packet )
if 16 - 16: I1Ii111 % OoO0O00 - OoO0O00 % OoOoOO00 * OoO0O00
if 36 - 36: OoOoOO00 * II111iiii . OoooooooOO * I11i . I11i
if 13 - 13: I1ii11iIi11i * II111iiii
if 93 - 93: OOooOOo / O0 - o0oOOo0O0Ooo + OoO0O00 * I1IiiI
if 53 - 53: I1ii11iIi11i
if 91 - 91: o0oOOo0O0Ooo - I1ii11iIi11i . i1IIi
if 64 - 64: ooOoO0o
if 23 - 23: Oo0Ooo . OoO0O00
if 49 - 49: oO0o % i11iIiiIii * Ii1I
def lisp_rate_limit_map_request ( source , dest ) :
if ( lisp_last_map_request_sent == None ) : return ( False )
O0oO0oOOO0oO = lisp_get_timestamp ( )
iIIiI1iiI = O0oO0oOOO0oO - lisp_last_map_request_sent
IIIIi1II = ( iIIiI1iiI < LISP_MAP_REQUEST_RATE_LIMIT )
if 42 - 42: iII111i . o0oOOo0O0Ooo . OoO0O00 * Oo0Ooo
if ( IIIIi1II ) :
if ( source != None ) : source = source . print_address ( )
dest = dest . print_address ( )
dprint ( "Rate-limiting Map-Request for {} -> {}" . format ( source , dest ) )
if 39 - 39: i11iIiiIii - iII111i / O0 % Oo0Ooo
return ( IIIIi1II )
if 40 - 40: O0 * Oo0Ooo % o0oOOo0O0Ooo / OoooooooOO
if 94 - 94: iII111i
if 79 - 79: o0oOOo0O0Ooo / I1ii11iIi11i . iII111i . II111iiii + I1ii11iIi11i * I11i
if 49 - 49: Ii1I * OoooooooOO * i1IIi % OoOoOO00
if 83 - 83: iIii1I11I1II1 - i1IIi - Ii1I % iII111i
if 69 - 69: I1Ii111 * oO0o * I1IiiI
if 74 - 74: O0 / I11i . Oo0Ooo / I11i % OoO0O00 % o0oOOo0O0Ooo
def lisp_send_map_request ( lisp_sockets , lisp_ephem_port , seid , deid , rloc ) :
global lisp_last_map_request_sent
if 83 - 83: OoO0O00 - i11iIiiIii + iIii1I11I1II1
if 52 - 52: OoooooooOO
if 44 - 44: O0 / OoooooooOO + ooOoO0o * I1ii11iIi11i
if 36 - 36: I1ii11iIi11i / OoO0O00 - oO0o % O0
if 12 - 12: i1IIi * ooOoO0o / oO0o + I1IiiI / OoooooooOO
if 86 - 86: Oo0Ooo / OoO0O00
oooooOOOoO00 = ooOo00oOOoo00O = None
if ( rloc ) :
oooooOOOoO00 = rloc . rloc
ooOo00oOOoo00O = rloc . translated_port if lisp_i_am_rtr else LISP_DATA_PORT
if 55 - 55: i1IIi / ooOoO0o * I1ii11iIi11i
if 23 - 23: OoOoOO00 - I11i . iIii1I11I1II1
if 87 - 87: OoO0O00 - i11iIiiIii / O0 % OOooOOo % OOooOOo * i1IIi
if 18 - 18: IiII
if 50 - 50: i1IIi / o0oOOo0O0Ooo * OoO0O00
o0iII11i , I1iiI , oO00O = lisp_myrlocs
if ( o0iII11i == None ) :
lprint ( "Suppress sending Map-Request, IPv4 RLOC not found" )
return
if 72 - 72: O0 - I1IiiI . Oo0Ooo / o0oOOo0O0Ooo - i1IIi
if ( I1iiI == None and oooooOOOoO00 != None and oooooOOOoO00 . is_ipv6 ( ) ) :
lprint ( "Suppress sending Map-Request, IPv6 RLOC not found" )
return
if 98 - 98: Oo0Ooo * ooOoO0o * I11i + oO0o - O0
if 3 - 3: i1IIi + OoOoOO00 - OoOoOO00
IIi11i1I = lisp_map_request ( )
IIi11i1I . record_count = 1
IIi11i1I . nonce = lisp_get_control_nonce ( )
IIi11i1I . rloc_probe = ( oooooOOOoO00 != None )
if 85 - 85: o0oOOo0O0Ooo / o0oOOo0O0Ooo + Oo0Ooo * II111iiii + Ii1I * Ii1I
if 26 - 26: o0oOOo0O0Ooo + oO0o * i11iIiiIii / II111iiii
if 86 - 86: Ii1I
if 69 - 69: oO0o % o0oOOo0O0Ooo / o0oOOo0O0Ooo
if 1 - 1: Ii1I
if 43 - 43: o0oOOo0O0Ooo
if 78 - 78: I1Ii111 % i1IIi * I11i
if ( rloc ) : rloc . last_rloc_probe_nonce = IIi11i1I . nonce
if 59 - 59: OoOoOO00 % OoO0O00 % i11iIiiIii . II111iiii % I1ii11iIi11i + i1IIi
iIi1I = deid . is_multicast_address ( )
if ( iIi1I ) :
IIi11i1I . target_eid = seid
IIi11i1I . target_group = deid
else :
IIi11i1I . target_eid = deid
if 99 - 99: I11i + IiII * I1Ii111 - OOooOOo - i1IIi
if 77 - 77: I11i . IiII / OoO0O00 / I1Ii111
if 8 - 8: o0oOOo0O0Ooo + iII111i / OoO0O00 * ooOoO0o - oO0o . iII111i
if 32 - 32: OoooooooOO . I1Ii111 - I1ii11iIi11i
if 29 - 29: OoO0O00
if 33 - 33: I1ii11iIi11i - O0
if 72 - 72: Oo0Ooo * iII111i - I11i
if 81 - 81: I1Ii111
if 85 - 85: O0 % OoOoOO00 . I1ii11iIi11i
if ( IIi11i1I . rloc_probe == False ) :
iIiIIi1i = lisp_get_signature_eid ( )
if ( iIiIIi1i ) :
IIi11i1I . signature_eid . copy_address ( iIiIIi1i . eid )
IIi11i1I . privkey_filename = "./lisp-sig.pem"
if 46 - 46: OOooOOo * iIii1I11I1II1
if 33 - 33: OoO0O00 * II111iiii / i1IIi
if 93 - 93: I1Ii111 % I11i
if 64 - 64: I1IiiI % OoOoOO00 / Oo0Ooo
if 40 - 40: Ii1I + iIii1I11I1II1 / oO0o . II111iiii % O0 - IiII
if 49 - 49: IiII - OOooOOo * OOooOOo . O0
if ( seid == None or iIi1I ) :
IIi11i1I . source_eid . afi = LISP_AFI_NONE
else :
IIi11i1I . source_eid = seid
if 60 - 60: OoOoOO00 % iIii1I11I1II1 + IiII % o0oOOo0O0Ooo
if 64 - 64: OoOoOO00 * I1ii11iIi11i . OoooooooOO . i1IIi
if 61 - 61: OoO0O00
if 100 - 100: OoOoOO00
if 97 - 97: OoooooooOO
if 91 - 91: o0oOOo0O0Ooo / O0 % OoO0O00
if 35 - 35: iII111i % OoO0O00 * O0
if 37 - 37: OOooOOo
if 100 - 100: Oo0Ooo * I1IiiI . ooOoO0o
if 53 - 53: OOooOOo + o0oOOo0O0Ooo * Ii1I + O0
if 75 - 75: OoooooooOO
if 24 - 24: I1Ii111 % i11iIiiIii % oO0o . OOooOOo % IiII
if ( oooooOOOoO00 != None and lisp_nat_traversal and lisp_i_am_rtr == False ) :
if ( oooooOOOoO00 . is_private_address ( ) == False ) :
o0iII11i = lisp_get_any_translated_rloc ( )
if 23 - 23: o0oOOo0O0Ooo * II111iiii - Oo0Ooo - I1IiiI
if ( o0iII11i == None ) :
lprint ( "Suppress sending Map-Request, translated RLOC not found" )
return
if 86 - 86: I1IiiI - II111iiii * II111iiii * oO0o % OoooooooOO * OoOoOO00
if 93 - 93: I1IiiI + OoO0O00 % O0 - ooOoO0o * i1IIi
if 60 - 60: I1IiiI
if 9 - 9: I11i % i1IIi / ooOoO0o % iII111i - oO0o - II111iiii
if 29 - 29: ooOoO0o . II111iiii . i1IIi % oO0o
if 11 - 11: OoOoOO00 . OoO0O00 % I11i * iII111i % I1Ii111 . O0
if 17 - 17: OOooOOo / i11iIiiIii - i11iIiiIii . II111iiii . ooOoO0o
if 38 - 38: OOooOOo . OoooooooOO . II111iiii + OoO0O00 / oO0o . OoooooooOO
if ( oooooOOOoO00 == None or oooooOOOoO00 . is_ipv4 ( ) ) :
if ( lisp_nat_traversal and oooooOOOoO00 == None ) :
oOOOOO0 = lisp_get_any_translated_rloc ( )
if ( oOOOOO0 != None ) : o0iII11i = oOOOOO0
if 6 - 6: OoO0O00 * O0
IIi11i1I . itr_rlocs . append ( o0iII11i )
if 6 - 6: I1Ii111 * IiII * ooOoO0o + o0oOOo0O0Ooo / I11i - ooOoO0o
if ( oooooOOOoO00 == None or oooooOOOoO00 . is_ipv6 ( ) ) :
if ( I1iiI == None or I1iiI . is_ipv6_link_local ( ) ) :
I1iiI = None
else :
IIi11i1I . itr_rloc_count = 1 if ( oooooOOOoO00 == None ) else 0
IIi11i1I . itr_rlocs . append ( I1iiI )
if 78 - 78: i1IIi / iIii1I11I1II1 . oO0o
if 8 - 8: I1ii11iIi11i * OOooOOo * iIii1I11I1II1 + I11i . iII111i
if 55 - 55: I1IiiI + Ii1I % I1ii11iIi11i + iIii1I11I1II1
if 64 - 64: i1IIi / O0 - oO0o
if 7 - 7: IiII . IiII * Ii1I
if 1 - 1: i11iIiiIii
if 91 - 91: I1ii11iIi11i . OoO0O00 / OoO0O00 / I1ii11iIi11i + iII111i
if 20 - 20: o0oOOo0O0Ooo . I1Ii111 + O0
if 99 - 99: O0 / IiII . oO0o
if ( oooooOOOoO00 != None and IIi11i1I . itr_rlocs != [ ] ) :
o0oi1iIiii1I1ii = IIi11i1I . itr_rlocs [ 0 ]
else :
if ( deid . is_ipv4 ( ) ) :
o0oi1iIiii1I1ii = o0iII11i
elif ( deid . is_ipv6 ( ) ) :
o0oi1iIiii1I1ii = I1iiI
else :
o0oi1iIiii1I1ii = o0iII11i
if 18 - 18: OoooooooOO * OoO0O00 * I1Ii111
if 12 - 12: i11iIiiIii / iIii1I11I1II1 . I11i % I1Ii111 * ooOoO0o % ooOoO0o
if 13 - 13: i1IIi . ooOoO0o . ooOoO0o
if 24 - 24: iIii1I11I1II1
if 72 - 72: i11iIiiIii + o0oOOo0O0Ooo % ooOoO0o * I1ii11iIi11i . i1IIi
if 59 - 59: OoooooooOO - OoooooooOO - o0oOOo0O0Ooo + i1IIi % I1Ii111
oOo = IIi11i1I . encode ( oooooOOOoO00 , ooOo00oOOoo00O )
IIi11i1I . print_map_request ( )
if 74 - 74: IiII * iIii1I11I1II1 - I1IiiI
if 62 - 62: o0oOOo0O0Ooo
if 54 - 54: iIii1I11I1II1 / OoooooooOO + o0oOOo0O0Ooo . i1IIi - OoooooooOO
if 70 - 70: Ii1I / OoOoOO00 * Oo0Ooo
if 32 - 32: I1Ii111 . OoOoOO00 % OoooooooOO + I1Ii111 * OoO0O00
if 84 - 84: OoOoOO00
if ( oooooOOOoO00 != None ) :
if ( rloc . is_rloc_translated ( ) ) :
iiI = lisp_get_nat_info ( oooooOOOoO00 , rloc . rloc_name )
if 80 - 80: oO0o
if 59 - 59: iIii1I11I1II1 / IiII % I1ii11iIi11i + OoO0O00 - I11i % OOooOOo
if 92 - 92: iII111i
if 96 - 96: OoOoOO00 / OoOoOO00 / OoOoOO00 + OoooooooOO + Oo0Ooo
if ( iiI == None ) :
Oo0O = rloc . rloc . print_address_no_iid ( )
o0 = "gleaned-{}" . format ( Oo0O )
i111 = rloc . translated_port
iiI = lisp_nat_info ( Oo0O , o0 , i111 )
if 91 - 91: OoOoOO00 + II111iiii / I11i * iIii1I11I1II1
lisp_encapsulate_rloc_probe ( lisp_sockets , oooooOOOoO00 , iiI ,
oOo )
return
if 92 - 92: I1Ii111 - IiII / IiII
if 42 - 42: IiII
ooOOo0o = oooooOOOoO00 . print_address_no_iid ( )
iIi11i1I11Ii = lisp_convert_4to6 ( ooOOo0o )
lisp_send ( lisp_sockets , iIi11i1I11Ii , LISP_CTRL_PORT , oOo )
return
if 7 - 7: iIii1I11I1II1
if 35 - 35: IiII + O0 % I1Ii111 - I1ii11iIi11i - i1IIi
if 100 - 100: I1Ii111 + i11iIiiIii - IiII / I1ii11iIi11i / iII111i
if 56 - 56: iII111i
if 91 - 91: Oo0Ooo . I11i . I1ii11iIi11i
if 60 - 60: i11iIiiIii - OOooOOo
Oo00ooOOOo0O0 = None if lisp_i_am_rtr else seid
if ( lisp_decent_pull_xtr_configured ( ) ) :
Ii1IIi1III1i = lisp_get_decent_map_resolver ( deid )
else :
Ii1IIi1III1i = lisp_get_map_resolver ( None , Oo00ooOOOo0O0 )
if 17 - 17: O0 * i11iIiiIii - I1ii11iIi11i * iIii1I11I1II1 + oO0o * i1IIi
if ( Ii1IIi1III1i == None ) :
lprint ( "Cannot find Map-Resolver for source-EID {}" . format ( green ( seid . print_address ( ) , False ) ) )
if 15 - 15: ooOoO0o + I1ii11iIi11i / I1IiiI - Oo0Ooo - Ii1I / I11i
return
if 37 - 37: ooOoO0o / II111iiii . OOooOOo % iIii1I11I1II1 - Oo0Ooo - Ii1I
Ii1IIi1III1i . last_used = lisp_get_timestamp ( )
Ii1IIi1III1i . map_requests_sent += 1
if ( Ii1IIi1III1i . last_nonce == 0 ) : Ii1IIi1III1i . last_nonce = IIi11i1I . nonce
if 47 - 47: I1ii11iIi11i
if 26 - 26: iII111i
if 55 - 55: I1ii11iIi11i . ooOoO0o * Oo0Ooo + I1Ii111
if 59 - 59: iII111i - OOooOOo - OoO0O00 . I1IiiI % o0oOOo0O0Ooo + iII111i
if ( seid == None ) : seid = o0oi1iIiii1I1ii
lisp_send_ecm ( lisp_sockets , oOo , seid , lisp_ephem_port , deid ,
Ii1IIi1III1i . map_resolver )
if 10 - 10: iIii1I11I1II1 - Ii1I
if 84 - 84: iII111i
if 21 - 21: i11iIiiIii
if 30 - 30: OoO0O00 + OoooooooOO
lisp_last_map_request_sent = lisp_get_timestamp ( )
if 98 - 98: I1ii11iIi11i % I1IiiI
if 9 - 9: o0oOOo0O0Ooo / I1Ii111 % i1IIi - OOooOOo % I1IiiI / I1ii11iIi11i
if 66 - 66: IiII
if 56 - 56: oO0o + OoooooooOO
Ii1IIi1III1i . resolve_dns_name ( )
return
if 75 - 75: O0 % Ii1I
if 47 - 47: OoooooooOO - OoooooooOO + OoO0O00 / iIii1I11I1II1
if 23 - 23: iII111i / iIii1I11I1II1
if 5 - 5: O0
if 64 - 64: i1IIi * i1IIi . iII111i - O0 - oO0o % OoooooooOO
if 14 - 14: Ii1I % OoO0O00 % I1Ii111 * O0
if 8 - 8: I1IiiI - i11iIiiIii * I1IiiI
if 6 - 6: O0 - OoOoOO00 - i11iIiiIii / iII111i
def lisp_send_info_request ( lisp_sockets , dest , port , device_name ) :
if 63 - 63: OOooOOo
if 84 - 84: i11iIiiIii * iIii1I11I1II1 % I11i % iII111i + OoooooooOO . o0oOOo0O0Ooo
if 78 - 78: o0oOOo0O0Ooo . iII111i + O0 / I1ii11iIi11i + I1ii11iIi11i + II111iiii
if 96 - 96: iIii1I11I1II1 * II111iiii . iIii1I11I1II1
i1iI1iII = lisp_info ( )
i1iI1iII . nonce = lisp_get_control_nonce ( )
if ( device_name ) : i1iI1iII . hostname += "-" + device_name
if 80 - 80: I1IiiI % I1ii11iIi11i
ooOOo0o = dest . print_address_no_iid ( )
if 82 - 82: ooOoO0o * I1IiiI % IiII
if 62 - 62: OoooooooOO . OoooooooOO / I11i % OoOoOO00
if 2 - 2: IiII % I1ii11iIi11i * OoO0O00 + Oo0Ooo * iII111i
if 85 - 85: OOooOOo * I1IiiI - iIii1I11I1II1 - OoOoOO00 + ooOoO0o . OoO0O00
if 46 - 46: OoO0O00 * I1Ii111 . O0
if 86 - 86: i11iIiiIii . Ii1I / OoOoOO00 / I11i * i1IIi
if 40 - 40: o0oOOo0O0Ooo
if 33 - 33: i11iIiiIii + I1Ii111 % I1ii11iIi11i - I1Ii111 * OoO0O00
if 1 - 1: II111iiii / I1IiiI + II111iiii % II111iiii - I1Ii111
if 24 - 24: I11i / Oo0Ooo / i1IIi + IiII
if 10 - 10: I11i - IiII / II111iiii / oO0o % O0 / I1Ii111
if 91 - 91: oO0o * OoOoOO00 + O0 % Oo0Ooo
if 62 - 62: iIii1I11I1II1 - i11iIiiIii % iIii1I11I1II1 . ooOoO0o / OOooOOo * OoOoOO00
if 45 - 45: OOooOOo - OOooOOo % iII111i - IiII . O0
if 6 - 6: iIii1I11I1II1 * II111iiii / O0 % IiII - I1Ii111
if 64 - 64: ooOoO0o
Ii11II111 = False
if ( device_name ) :
OO0OOOoOooo0 = lisp_get_host_route_next_hop ( ooOOo0o )
if 83 - 83: I1Ii111 % ooOoO0o + OoooooooOO
if 50 - 50: i11iIiiIii % I1IiiI * iII111i / Ii1I
if 12 - 12: iII111i / OoO0O00 - II111iiii + Oo0Ooo
if 78 - 78: i1IIi
if 25 - 25: Ii1I * II111iiii / OoOoOO00
if 86 - 86: i1IIi + I1IiiI + I1Ii111 % II111iiii . IiII - iIii1I11I1II1
if 54 - 54: i11iIiiIii . Ii1I % I1IiiI . I1Ii111 . OoooooooOO
if 49 - 49: OOooOOo % I11i - OOooOOo + Ii1I . I1ii11iIi11i + ooOoO0o
if 15 - 15: i11iIiiIii
if ( port == LISP_CTRL_PORT and OO0OOOoOooo0 != None ) :
while ( True ) :
time . sleep ( .01 )
OO0OOOoOooo0 = lisp_get_host_route_next_hop ( ooOOo0o )
if ( OO0OOOoOooo0 == None ) : break
if 85 - 85: I1Ii111 + iII111i - oO0o
if 59 - 59: IiII . oO0o / i11iIiiIii . I1Ii111
if 64 - 64: OoOoOO00
II1i1 = lisp_get_default_route_next_hops ( )
for oO00O , O0o0 in II1i1 :
if ( oO00O != device_name ) : continue
if 70 - 70: OoOoOO00 % o0oOOo0O0Ooo + o0oOOo0O0Ooo
if 53 - 53: i1IIi % Oo0Ooo + O0 . I11i
if 8 - 8: O0 + o0oOOo0O0Ooo + oO0o - OoOoOO00 % iII111i - IiII
if 27 - 27: o0oOOo0O0Ooo
if 20 - 20: i1IIi / IiII . OOooOOo - I1ii11iIi11i * O0 * OoOoOO00
if 11 - 11: I11i + i1IIi
if ( OO0OOOoOooo0 != O0o0 ) :
if ( OO0OOOoOooo0 != None ) :
lisp_install_host_route ( ooOOo0o , OO0OOOoOooo0 , False )
if 49 - 49: OoooooooOO
lisp_install_host_route ( ooOOo0o , O0o0 , True )
Ii11II111 = True
if 75 - 75: OoO0O00
break
if 52 - 52: i11iIiiIii
if 97 - 97: Oo0Ooo % IiII
if 24 - 24: iIii1I11I1II1
if 97 - 97: o0oOOo0O0Ooo - iIii1I11I1II1 + I1Ii111 / ooOoO0o + Ii1I
if 22 - 22: oO0o + O0 + I11i . OoO0O00 - II111iiii
if 20 - 20: Ii1I * I1Ii111 . I1IiiI % OoOoOO00 / OoO0O00 % II111iiii
oOo = i1iI1iII . encode ( )
i1iI1iII . print_info ( )
if 43 - 43: IiII + II111iiii + oO0o / I1ii11iIi11i % i1IIi - OoO0O00
if 59 - 59: Oo0Ooo + O0 + iII111i
if 71 - 71: IiII - OoO0O00
if 90 - 90: Oo0Ooo
Oo0000Oo0Oo = "(for control)" if port == LISP_CTRL_PORT else "(for data)"
Oo0000Oo0Oo = bold ( Oo0000Oo0Oo , False )
i111 = bold ( "{}" . format ( port ) , False )
ii1iI1iI1 = red ( ooOOo0o , False )
Ii111iI1iI1ii = "RTR " if port == LISP_DATA_PORT else "MS "
lprint ( "Send Info-Request to {}{}, port {} {}" . format ( Ii111iI1iI1ii , ii1iI1iI1 , i111 , Oo0000Oo0Oo ) )
if 60 - 60: Ii1I . I1ii11iIi11i - I11i + i11iIiiIii / iII111i
if 9 - 9: I1Ii111 . oO0o . OoO0O00 / IiII - oO0o / oO0o
if 50 - 50: II111iiii + OoOoOO00
if 17 - 17: ooOoO0o + I1ii11iIi11i
if 34 - 34: Ii1I / II111iiii + OoOoOO00 . II111iiii + OoooooooOO * o0oOOo0O0Ooo
if 48 - 48: O0
if ( port == LISP_CTRL_PORT ) :
lisp_send ( lisp_sockets , dest , LISP_CTRL_PORT , oOo )
else :
oooooOOo0Oo = lisp_data_header ( )
oooooOOo0Oo . instance_id ( 0xffffff )
oooooOOo0Oo = oooooOOo0Oo . encode ( )
if ( oooooOOo0Oo ) :
oOo = oooooOOo0Oo + oOo
if 99 - 99: II111iiii * oO0o / I1ii11iIi11i - i1IIi
if 84 - 84: i11iIiiIii . OoooooooOO
if 69 - 69: I1Ii111 * II111iiii % I1Ii111 * i11iIiiIii . ooOoO0o / Oo0Ooo
if 5 - 5: Ii1I
if 19 - 19: oO0o
if 61 - 61: OoOoOO00 + iIii1I11I1II1 / I1ii11iIi11i - i1IIi
if 11 - 11: oO0o * o0oOOo0O0Ooo . I1IiiI
if 12 - 12: I1IiiI % OoO0O00 / I1Ii111 / O0 % o0oOOo0O0Ooo
if 1 - 1: OoOoOO00 / I11i
lisp_send ( lisp_sockets , dest , LISP_DATA_PORT , oOo )
if 43 - 43: o0oOOo0O0Ooo - i1IIi / Ii1I . OoOoOO00 + i11iIiiIii
if 69 - 69: i11iIiiIii - iIii1I11I1II1
if 40 - 40: I1IiiI / oO0o + ooOoO0o
if 100 - 100: OoOoOO00 % iII111i * ooOoO0o . O0
if 37 - 37: I1ii11iIi11i
if 24 - 24: O0 . I1Ii111 * i11iIiiIii
if 84 - 84: ooOoO0o / I1ii11iIi11i - o0oOOo0O0Ooo . OoooooooOO * iIii1I11I1II1
if ( Ii11II111 ) :
lisp_install_host_route ( ooOOo0o , None , False )
if ( OO0OOOoOooo0 != None ) : lisp_install_host_route ( ooOOo0o , OO0OOOoOooo0 , True )
if 16 - 16: I11i % O0
return
if 56 - 56: Ii1I * OoOoOO00 . i1IIi
if 15 - 15: I1Ii111
if 64 - 64: OOooOOo * Oo0Ooo
if 96 - 96: Oo0Ooo / I1ii11iIi11i * iIii1I11I1II1 / iII111i
if 18 - 18: I1Ii111
if 29 - 29: i1IIi - I1IiiI / i1IIi
if 64 - 64: IiII
def lisp_process_info_request ( lisp_sockets , packet , addr_str , sport , rtr_list ) :
if 69 - 69: OOooOOo . I1IiiI
if 11 - 11: I1Ii111 * I1IiiI - I1Ii111 / iII111i
if 22 - 22: iII111i % I11i % O0 - I11i
if 71 - 71: I1Ii111 / II111iiii - OoooooooOO % i1IIi + OoOoOO00 % OoooooooOO
i1iI1iII = lisp_info ( )
packet = i1iI1iII . decode ( packet )
if ( packet == None ) : return
i1iI1iII . print_info ( )
if 52 - 52: Ii1I . OoOoOO00 / o0oOOo0O0Ooo / iII111i
if 83 - 83: OoO0O00 - Oo0Ooo + I1Ii111 . I1IiiI
if 78 - 78: I11i / ooOoO0o . OoOoOO00 * i1IIi
if 15 - 15: i1IIi . II111iiii * OoOoOO00 / Oo0Ooo
if 99 - 99: iII111i - o0oOOo0O0Ooo / O0
i1iI1iII . info_reply = True
i1iI1iII . global_etr_rloc . store_address ( addr_str )
i1iI1iII . etr_port = sport
if 97 - 97: iIii1I11I1II1 * I1Ii111
if 39 - 39: I1Ii111 . II111iiii
if 94 - 94: OoO0O00 - OoO0O00 + iIii1I11I1II1 + O0 * oO0o
if 9 - 9: Ii1I * Oo0Ooo / oO0o / Ii1I
if 34 - 34: I1IiiI
if ( i1iI1iII . hostname != None ) :
i1iI1iII . private_etr_rloc . afi = LISP_AFI_NAME
i1iI1iII . private_etr_rloc . store_address ( i1iI1iII . hostname )
if 56 - 56: Ii1I
if 71 - 71: O0 / i1IIi
if ( rtr_list != None ) : i1iI1iII . rtr_list = rtr_list
packet = i1iI1iII . encode ( )
i1iI1iII . print_info ( )
if 20 - 20: OOooOOo . iIii1I11I1II1 - I1Ii111 . i1IIi
if 82 - 82: oO0o * i11iIiiIii % o0oOOo0O0Ooo % IiII - I11i - OoO0O00
if 24 - 24: oO0o . II111iiii + OoO0O00 * I1ii11iIi11i / oO0o
if 86 - 86: I1Ii111 + I1ii11iIi11i
if 63 - 63: ooOoO0o - i11iIiiIii . o0oOOo0O0Ooo - i1IIi - IiII
lprint ( "Send Info-Reply to {}" . format ( red ( addr_str , False ) ) )
iIi11i1I11Ii = lisp_convert_4to6 ( addr_str )
lisp_send ( lisp_sockets , iIi11i1I11Ii , sport , packet )
if 32 - 32: I1Ii111 / iIii1I11I1II1 + oO0o % I11i * OoooooooOO
if 69 - 69: OOooOOo
if 9 - 9: i11iIiiIii * Oo0Ooo
if 33 - 33: oO0o / ooOoO0o
if 92 - 92: O0 . Oo0Ooo - Ii1I * I1IiiI * Oo0Ooo * iII111i
O0OoO00OOOoOo = lisp_info_source ( i1iI1iII . hostname , addr_str , sport )
O0OoO00OOOoOo . cache_address_for_info_source ( )
return
if 46 - 46: i11iIiiIii . i11iIiiIii
if 53 - 53: IiII - I1Ii111 - OOooOOo . OoOoOO00 / iIii1I11I1II1
if 89 - 89: Oo0Ooo
if 57 - 57: i1IIi - oO0o % IiII . I11i
if 17 - 17: i1IIi % OoO0O00 + i11iIiiIii % I1Ii111 * ooOoO0o . I1ii11iIi11i
if 64 - 64: O0 - iII111i
if 82 - 82: O0
if 37 - 37: I1Ii111
def lisp_get_signature_eid ( ) :
for iIiIIi1i in lisp_db_list :
if ( iIiIIi1i . signature_eid ) : return ( iIiIIi1i )
if 98 - 98: iII111i - OoOoOO00 / I1Ii111 . OOooOOo - OOooOOo - ooOoO0o
return ( None )
if 84 - 84: OOooOOo * ooOoO0o / O0
if 96 - 96: I11i . I11i % II111iiii
if 14 - 14: iII111i / OoooooooOO
if 8 - 8: OOooOOo + I1IiiI - Oo0Ooo + i1IIi . Ii1I . I1Ii111
if 38 - 38: I1IiiI / II111iiii * OoOoOO00 / I1Ii111
if 80 - 80: I1ii11iIi11i / ooOoO0o * ooOoO0o . Oo0Ooo
if 44 - 44: Ii1I * i1IIi % OoOoOO00 . OoOoOO00
if 16 - 16: Oo0Ooo / i1IIi / iIii1I11I1II1 / iIii1I11I1II1 % o0oOOo0O0Ooo / I1ii11iIi11i
def lisp_get_any_translated_port ( ) :
for iIiIIi1i in lisp_db_list :
for O0OO0O in iIiIIi1i . rloc_set :
if ( O0OO0O . translated_rloc . is_null ( ) ) : continue
return ( O0OO0O . translated_port )
if 11 - 11: I1IiiI
if 45 - 45: OOooOOo / i1IIi * IiII * I1Ii111
return ( None )
if 34 - 34: ooOoO0o / iIii1I11I1II1 . iII111i
if 91 - 91: OoO0O00
if 8 - 8: oO0o
if 96 - 96: IiII
if 37 - 37: Ii1I % i11iIiiIii + iIii1I11I1II1 % Oo0Ooo - iIii1I11I1II1
if 26 - 26: o0oOOo0O0Ooo . i1IIi
if 62 - 62: IiII * I1ii11iIi11i % iIii1I11I1II1 / II111iiii - OoO0O00
if 52 - 52: iII111i . I11i - I11i + oO0o + iIii1I11I1II1
if 83 - 83: I11i * iIii1I11I1II1 + OoOoOO00
def lisp_get_any_translated_rloc ( ) :
for iIiIIi1i in lisp_db_list :
for O0OO0O in iIiIIi1i . rloc_set :
if ( O0OO0O . translated_rloc . is_null ( ) ) : continue
return ( O0OO0O . translated_rloc )
if 81 - 81: ooOoO0o * OOooOOo / OoO0O00 + I1ii11iIi11i % I1Ii111
if 37 - 37: i11iIiiIii - OoooooooOO - OoOoOO00 * oO0o / Ii1I
return ( None )
if 100 - 100: II111iiii / Oo0Ooo / iII111i / OOooOOo
if 100 - 100: iIii1I11I1II1
if 50 - 50: I1Ii111 / ooOoO0o * I11i
if 53 - 53: II111iiii . IiII
if 5 - 5: i1IIi % IiII
if 16 - 16: ooOoO0o - iII111i % Ii1I . OoOoOO00
if 56 - 56: i11iIiiIii % i11iIiiIii % OoooooooOO . Ii1I . iII111i + I11i
def lisp_get_all_translated_rlocs ( ) :
oOoII = [ ]
for iIiIIi1i in lisp_db_list :
for O0OO0O in iIiIIi1i . rloc_set :
if ( O0OO0O . is_rloc_translated ( ) == False ) : continue
iIiIi1iI11iiI = O0OO0O . translated_rloc . print_address_no_iid ( )
oOoII . append ( iIiIi1iI11iiI )
if 34 - 34: OoO0O00 * iIii1I11I1II1 . iIii1I11I1II1
if 39 - 39: o0oOOo0O0Ooo
return ( oOoII )
if 29 - 29: Oo0Ooo . Oo0Ooo * OoO0O00 % Ii1I - ooOoO0o
if 67 - 67: I1IiiI % O0 + I1IiiI * I1Ii111 * OoOoOO00 * II111iiii
if 79 - 79: I1IiiI
if 37 - 37: I1Ii111 + Ii1I
if 50 - 50: i11iIiiIii
if 57 - 57: O0 * i1IIi - I1IiiI
if 48 - 48: IiII / iIii1I11I1II1
if 20 - 20: oO0o / OoooooooOO
def lisp_update_default_routes ( map_resolver , iid , rtr_list ) :
OOOO = ( os . getenv ( "LISP_RTR_BEHIND_NAT" ) != None )
if 95 - 95: Oo0Ooo . i11iIiiIii
i1IiI1i = { }
for Oo0o0o0oo in rtr_list :
if ( Oo0o0o0oo == None ) : continue
iIiIi1iI11iiI = rtr_list [ Oo0o0o0oo ]
if ( OOOO and iIiIi1iI11iiI . is_private_address ( ) ) : continue
i1IiI1i [ Oo0o0o0oo ] = iIiIi1iI11iiI
if 10 - 10: OoO0O00 - oO0o + Oo0Ooo / i11iIiiIii + Ii1I + I11i
rtr_list = i1IiI1i
if 59 - 59: ooOoO0o * II111iiii
ooOoOOOoO = [ ]
for o0o0O00oOo in [ LISP_AFI_IPV4 , LISP_AFI_IPV6 , LISP_AFI_MAC ] :
if ( o0o0O00oOo == LISP_AFI_MAC and lisp_l2_overlay == False ) : break
if 14 - 14: OoO0O00 * I1IiiI
if 78 - 78: I1IiiI / iII111i - ooOoO0o - i11iIiiIii
if 39 - 39: i11iIiiIii / oO0o
if 71 - 71: I1Ii111 * iIii1I11I1II1 - I1Ii111
if 87 - 87: I1IiiI / Ii1I
OOO0000o = lisp_address ( o0o0O00oOo , "" , 0 , iid )
OOO0000o . make_default_route ( OOO0000o )
ooooOoo000O = lisp_map_cache . lookup_cache ( OOO0000o , True )
if ( ooooOoo000O ) :
if ( ooooOoo000O . checkpoint_entry ) :
lprint ( "Updating checkpoint entry for {}" . format ( green ( ooooOoo000O . print_eid_tuple ( ) , False ) ) )
if 54 - 54: OoooooooOO / Ii1I
elif ( ooooOoo000O . do_rloc_sets_match ( rtr_list . values ( ) ) ) :
continue
if 26 - 26: o0oOOo0O0Ooo + OoO0O00
ooooOoo000O . delete_cache ( )
if 59 - 59: Ii1I * IiII
if 64 - 64: ooOoO0o . Oo0Ooo - OoOoOO00
ooOoOOOoO . append ( [ OOO0000o , "" ] )
if 66 - 66: OoOoOO00
if 83 - 83: OOooOOo . IiII
if 98 - 98: i11iIiiIii
if 74 - 74: iIii1I11I1II1 * O0 + OOooOOo . o0oOOo0O0Ooo
i1i11Ii1 = lisp_address ( o0o0O00oOo , "" , 0 , iid )
i1i11Ii1 . make_default_multicast_route ( i1i11Ii1 )
iI1iI1II1i1i = lisp_map_cache . lookup_cache ( i1i11Ii1 , True )
if ( iI1iI1II1i1i ) : iI1iI1II1i1i = iI1iI1II1i1i . source_cache . lookup_cache ( OOO0000o , True )
if ( iI1iI1II1i1i ) : iI1iI1II1i1i . delete_cache ( )
if 68 - 68: iII111i
ooOoOOOoO . append ( [ OOO0000o , i1i11Ii1 ] )
if 68 - 68: I1Ii111 - OoO0O00 % OoO0O00 % OOooOOo - OoO0O00
if ( len ( ooOoOOOoO ) == 0 ) : return
if 3 - 3: iIii1I11I1II1 + iIii1I11I1II1 + OoO0O00
if 59 - 59: iII111i
if 7 - 7: o0oOOo0O0Ooo * OoooooooOO - Ii1I * II111iiii % I1Ii111
if 82 - 82: OoOoOO00 - OoOoOO00 + iIii1I11I1II1 + o0oOOo0O0Ooo + IiII - o0oOOo0O0Ooo
iiiI11II1IiIi = [ ]
for Ii111iI1iI1ii in rtr_list :
o00000 = rtr_list [ Ii111iI1iI1ii ]
O0OO0O = lisp_rloc ( )
O0OO0O . rloc . copy_address ( o00000 )
O0OO0O . priority = 254
O0OO0O . mpriority = 255
O0OO0O . rloc_name = "RTR"
iiiI11II1IiIi . append ( O0OO0O )
if 65 - 65: iII111i + OoO0O00 - iIii1I11I1II1 / OoooooooOO . ooOoO0o . o0oOOo0O0Ooo
if 94 - 94: ooOoO0o . oO0o * OoooooooOO % oO0o
for OOO0000o in ooOoOOOoO :
ooooOoo000O = lisp_mapping ( OOO0000o [ 0 ] , OOO0000o [ 1 ] , iiiI11II1IiIi )
ooooOoo000O . mapping_source = map_resolver
ooooOoo000O . map_cache_ttl = LISP_MR_TTL * 60
ooooOoo000O . add_cache ( )
lprint ( "Add {} to map-cache with RTR RLOC-set: {}" . format ( green ( ooooOoo000O . print_eid_tuple ( ) , False ) , rtr_list . keys ( ) ) )
if 77 - 77: ooOoO0o % I1IiiI
iiiI11II1IiIi = copy . deepcopy ( iiiI11II1IiIi )
if 26 - 26: o0oOOo0O0Ooo
return
if 72 - 72: I1IiiI
if 90 - 90: ooOoO0o
if 67 - 67: iIii1I11I1II1 + i1IIi * I1IiiI * OoooooooOO
if 23 - 23: IiII
if 32 - 32: OoOoOO00 - iII111i % oO0o / I1ii11iIi11i - o0oOOo0O0Ooo
if 52 - 52: Ii1I / OoooooooOO % i11iIiiIii + iII111i
if 59 - 59: Ii1I / o0oOOo0O0Ooo / oO0o + iII111i * I1ii11iIi11i - o0oOOo0O0Ooo
if 70 - 70: O0 / I1ii11iIi11i + ooOoO0o . OoO0O00 - OoO0O00 / i11iIiiIii
if 1 - 1: iIii1I11I1II1 % I1ii11iIi11i
if 49 - 49: iII111i + o0oOOo0O0Ooo % I1ii11iIi11i . O0 % OoooooooOO . o0oOOo0O0Ooo
def lisp_process_info_reply ( source , packet , store ) :
if 3 - 3: i11iIiiIii - i1IIi * o0oOOo0O0Ooo / OoOoOO00 % Oo0Ooo
if 65 - 65: OoooooooOO + iII111i - i11iIiiIii - IiII + oO0o
if 67 - 67: i1IIi * I1Ii111 * O0
if 16 - 16: OoO0O00 + iII111i + i1IIi + I1ii11iIi11i - I1IiiI
i1iI1iII = lisp_info ( )
packet = i1iI1iII . decode ( packet )
if ( packet == None ) : return ( [ None , None , False ] )
if 88 - 88: oO0o % iII111i + I1ii11iIi11i - II111iiii . I11i
i1iI1iII . print_info ( )
if 18 - 18: I1ii11iIi11i - i1IIi - IiII * II111iiii % I1Ii111 . II111iiii
if 80 - 80: oO0o + OoO0O00 + o0oOOo0O0Ooo . OoOoOO00
if 75 - 75: i11iIiiIii
if 58 - 58: iII111i
iIi111I1iiii = False
for Ii111iI1iI1ii in i1iI1iII . rtr_list :
ooOOo0o = Ii111iI1iI1ii . print_address_no_iid ( )
if ( lisp_rtr_list . has_key ( ooOOo0o ) ) :
if ( lisp_register_all_rtrs == False ) : continue
if ( lisp_rtr_list [ ooOOo0o ] != None ) : continue
if 66 - 66: O0 % OoOoOO00 + IiII % I1Ii111
iIi111I1iiii = True
lisp_rtr_list [ ooOOo0o ] = Ii111iI1iI1ii
if 94 - 94: OoOoOO00 / OoooooooOO % Ii1I * i11iIiiIii
if 95 - 95: iIii1I11I1II1 % OOooOOo % O0
if 93 - 93: I1ii11iIi11i
if 61 - 61: o0oOOo0O0Ooo * ooOoO0o
if 82 - 82: O0 * O0 % I1IiiI / o0oOOo0O0Ooo
if ( lisp_i_am_itr and iIi111I1iiii ) :
if ( lisp_iid_to_interface == { } ) :
lisp_update_default_routes ( source , lisp_default_iid , lisp_rtr_list )
else :
for II1 in lisp_iid_to_interface . keys ( ) :
lisp_update_default_routes ( source , int ( II1 ) , lisp_rtr_list )
if 46 - 46: IiII . O0 . I11i % I1ii11iIi11i * oO0o - oO0o
if 92 - 92: I1IiiI - I1IiiI
if 28 - 28: oO0o * iII111i + IiII
if 73 - 73: OoooooooOO
if 45 - 45: IiII + I1IiiI * I1Ii111
if 82 - 82: OOooOOo / I11i % Ii1I * OoOoOO00
if 88 - 88: o0oOOo0O0Ooo % OoO0O00
if ( store == False ) :
return ( [ i1iI1iII . global_etr_rloc , i1iI1iII . etr_port , iIi111I1iiii ] )
if 30 - 30: II111iiii / Oo0Ooo % Oo0Ooo + O0 / iIii1I11I1II1 . OoO0O00
if 43 - 43: I1IiiI % OoOoOO00 * O0 + o0oOOo0O0Ooo
if 97 - 97: iIii1I11I1II1 + O0
if 41 - 41: OoOoOO00 - II111iiii
if 46 - 46: OOooOOo
if 73 - 73: iII111i - IiII + II111iiii
for iIiIIi1i in lisp_db_list :
for O0OO0O in iIiIIi1i . rloc_set :
Oo0o0o0oo = O0OO0O . rloc
II111IiiiI1 = O0OO0O . interface
if ( II111IiiiI1 == None ) :
if ( Oo0o0o0oo . is_null ( ) ) : continue
if ( Oo0o0o0oo . is_local ( ) == False ) : continue
if ( i1iI1iII . private_etr_rloc . is_null ( ) == False and
Oo0o0o0oo . is_exact_match ( i1iI1iII . private_etr_rloc ) == False ) :
continue
if 58 - 58: Oo0Ooo % I1IiiI
elif ( i1iI1iII . private_etr_rloc . is_dist_name ( ) ) :
i1OOO = i1iI1iII . private_etr_rloc . address
if ( i1OOO != O0OO0O . rloc_name ) : continue
if 78 - 78: iII111i / iIii1I11I1II1 * IiII . ooOoO0o / I1Ii111 % I11i
if 14 - 14: II111iiii % iIii1I11I1II1 - I1IiiI % i11iIiiIii . OOooOOo * I1ii11iIi11i
oO00oo000O = green ( iIiIIi1i . eid . print_prefix ( ) , False )
ooOOo00o0ooO = red ( Oo0o0o0oo . print_address_no_iid ( ) , False )
if 12 - 12: I1ii11iIi11i % I1ii11iIi11i . OoO0O00 . OoOoOO00
OO0ooOoo000O = i1iI1iII . global_etr_rloc . is_exact_match ( Oo0o0o0oo )
if ( O0OO0O . translated_port == 0 and OO0ooOoo000O ) :
lprint ( "No NAT for {} ({}), EID-prefix {}" . format ( ooOOo00o0ooO ,
II111IiiiI1 , oO00oo000O ) )
continue
if 69 - 69: O0 / Ii1I
if 58 - 58: I11i % Ii1I - iIii1I11I1II1 + ooOoO0o
if 28 - 28: i1IIi + IiII . I1Ii111 . OoOoOO00 % O0 - I1ii11iIi11i
if 68 - 68: oO0o
if 3 - 3: o0oOOo0O0Ooo + iII111i / o0oOOo0O0Ooo / I1IiiI * OOooOOo
oO0oo0O0oOOo0 = i1iI1iII . global_etr_rloc
oo0o = O0OO0O . translated_rloc
if ( oo0o . is_exact_match ( oO0oo0O0oOOo0 ) and
i1iI1iII . etr_port == O0OO0O . translated_port ) : continue
if 75 - 75: OoO0O00 . IiII / I11i * i11iIiiIii - OoO0O00 / IiII
lprint ( "Store translation {}:{} for {} ({}), EID-prefix {}" . format ( red ( i1iI1iII . global_etr_rloc . print_address_no_iid ( ) , False ) ,
# I1Ii111 + II111iiii + II111iiii + I1Ii111 % Ii1I % iIii1I11I1II1
i1iI1iII . etr_port , ooOOo00o0ooO , II111IiiiI1 , oO00oo000O ) )
if 48 - 48: Ii1I + oO0o + Ii1I . I1ii11iIi11i
O0OO0O . store_translated_rloc ( i1iI1iII . global_etr_rloc ,
i1iI1iII . etr_port )
if 32 - 32: OoOoOO00 . Oo0Ooo . OoOoOO00 * OoOoOO00 % I11i
if 21 - 21: ooOoO0o . i11iIiiIii / IiII . i1IIi + OoooooooOO
return ( [ i1iI1iII . global_etr_rloc , i1iI1iII . etr_port , iIi111I1iiii ] )
if 18 - 18: ooOoO0o - I11i - I1Ii111
if 81 - 81: IiII - Ii1I % i1IIi
if 48 - 48: Ii1I + I11i % iIii1I11I1II1 + ooOoO0o + ooOoO0o + OoO0O00
if 7 - 7: O0 + II111iiii
if 44 - 44: OOooOOo + i11iIiiIii - I1Ii111 + ooOoO0o
if 92 - 92: O0 . iIii1I11I1II1 % iIii1I11I1II1 % OoO0O00 - i11iIiiIii - iII111i
if 76 - 76: OoO0O00 . II111iiii / I1ii11iIi11i
if 15 - 15: OoOoOO00 . O0 + iII111i + I1IiiI . ooOoO0o + iIii1I11I1II1
def lisp_test_mr ( lisp_sockets , port ) :
return
lprint ( "Test Map-Resolvers" )
if 2 - 2: I11i
Oo00o = lisp_address ( LISP_AFI_IPV4 , "" , 0 , 0 )
oooO = lisp_address ( LISP_AFI_IPV6 , "" , 0 , 0 )
if 86 - 86: iIii1I11I1II1 + Oo0Ooo % ooOoO0o - iIii1I11I1II1 % ooOoO0o * OoOoOO00
if 80 - 80: Oo0Ooo . i1IIi - OOooOOo * OoOoOO00 . I1ii11iIi11i % OoO0O00
if 43 - 43: I1IiiI . I11i . Oo0Ooo % I1ii11iIi11i * O0
if 14 - 14: I1IiiI + Oo0Ooo - Ii1I - ooOoO0o % OoO0O00
Oo00o . store_address ( "10.0.0.1" )
lisp_send_map_request ( lisp_sockets , port , None , Oo00o , None )
Oo00o . store_address ( "192.168.0.1" )
lisp_send_map_request ( lisp_sockets , port , None , Oo00o , None )
if 63 - 63: OoooooooOO * iII111i % ooOoO0o
if 17 - 17: OoO0O00 % II111iiii . i1IIi . OOooOOo
if 49 - 49: II111iiii / OoOoOO00 * IiII % OoO0O00
if 77 - 77: OoOoOO00 + OOooOOo % o0oOOo0O0Ooo
oooO . store_address ( "0100::1" )
lisp_send_map_request ( lisp_sockets , port , None , oooO , None )
oooO . store_address ( "8000::1" )
lisp_send_map_request ( lisp_sockets , port , None , oooO , None )
if 3 - 3: ooOoO0o / i1IIi
if 71 - 71: Ii1I + oO0o % IiII
if 15 - 15: ooOoO0o . Oo0Ooo
if 42 - 42: OOooOOo . i11iIiiIii % O0 - OoO0O00
I11I11iiii = threading . Timer ( LISP_TEST_MR_INTERVAL , lisp_test_mr ,
[ lisp_sockets , port ] )
I11I11iiii . start ( )
return
if 34 - 34: i1IIi - I11i * OoooooooOO . IiII - I1Ii111
if 93 - 93: I1Ii111 . o0oOOo0O0Ooo
if 96 - 96: ooOoO0o - o0oOOo0O0Ooo % O0 * Ii1I . OoOoOO00
if 80 - 80: I1IiiI
if 31 - 31: I1Ii111 + o0oOOo0O0Ooo . I1IiiI + I11i . oO0o
if 50 - 50: Ii1I . OOooOOo
if 84 - 84: OoOoOO00 * OoO0O00 + I1IiiI
if 38 - 38: OoooooooOO % I1IiiI
if 80 - 80: iII111i / O0 % OoooooooOO / Oo0Ooo
if 75 - 75: ooOoO0o
if 72 - 72: oO0o . OoooooooOO % ooOoO0o % OoO0O00 * oO0o * OoO0O00
if 14 - 14: I11i / I11i
if 90 - 90: O0 * OOooOOo / oO0o . Oo0Ooo * I11i
def lisp_update_local_rloc ( rloc ) :
if ( rloc . interface == None ) : return
if 93 - 93: oO0o / ooOoO0o - I1Ii111
iIiIi1iI11iiI = lisp_get_interface_address ( rloc . interface )
if ( iIiIi1iI11iiI == None ) : return
if 70 - 70: OOooOOo / Ii1I - ooOoO0o + OoooooooOO / OoO0O00 - i11iIiiIii
iiIii1i = rloc . rloc . print_address_no_iid ( )
iiiii1I = iIiIi1iI11iiI . print_address_no_iid ( )
if 4 - 4: I1Ii111
if ( iiIii1i == iiiii1I ) : return
if 15 - 15: I11i % I11i / iIii1I11I1II1 - i11iIiiIii / i1IIi
lprint ( "Local interface address changed on {} from {} to {}" . format ( rloc . interface , iiIii1i , iiiii1I ) )
if 9 - 9: OoooooooOO
if 71 - 71: Ii1I
rloc . rloc . copy_address ( iIiIi1iI11iiI )
lisp_myrlocs [ 0 ] = iIiIi1iI11iiI
return
if 59 - 59: i1IIi * ooOoO0o . iIii1I11I1II1
if 87 - 87: ooOoO0o % I1ii11iIi11i . I1IiiI
if 42 - 42: iII111i % i11iIiiIii % o0oOOo0O0Ooo . O0 % iII111i
if 72 - 72: Oo0Ooo . Oo0Ooo . IiII . Oo0Ooo
if 80 - 80: I1Ii111 + IiII + O0 - I1Ii111 . iIii1I11I1II1
if 53 - 53: OoO0O00 / i11iIiiIii * I1Ii111
if 62 - 62: oO0o / Oo0Ooo / IiII + I11i * ooOoO0o
if 84 - 84: ooOoO0o + OoOoOO00 * I1ii11iIi11i % OoooooooOO . O0
def lisp_update_encap_port ( mc ) :
for Oo0o0o0oo in mc . rloc_set :
iiI = lisp_get_nat_info ( Oo0o0o0oo . rloc , Oo0o0o0oo . rloc_name )
if ( iiI == None ) : continue
if ( Oo0o0o0oo . translated_port == iiI . port ) : continue
if 27 - 27: OoO0O00 * OoooooooOO - II111iiii / o0oOOo0O0Ooo
lprint ( ( "Encap-port changed from {} to {} for RLOC {}, " + "EID-prefix {}" ) . format ( Oo0o0o0oo . translated_port , iiI . port ,
# I11i * Ii1I % OoO0O00 * I1Ii111 % IiII
red ( Oo0o0o0oo . rloc . print_address_no_iid ( ) , False ) ,
green ( mc . print_eid_tuple ( ) , False ) ) )
if 35 - 35: iII111i + iIii1I11I1II1 + II111iiii % IiII * Ii1I
Oo0o0o0oo . store_translated_rloc ( Oo0o0o0oo . rloc , iiI . port )
if 63 - 63: I1Ii111
return
if 55 - 55: I11i + OoO0O00 - i1IIi - I11i % IiII
if 64 - 64: Oo0Ooo
if 33 - 33: I1Ii111
if 1 - 1: IiII - iIii1I11I1II1 % OoooooooOO
if 1 - 1: o0oOOo0O0Ooo - i11iIiiIii + I11i
if 47 - 47: O0 + IiII + ooOoO0o + OOooOOo / OoOoOO00
if 31 - 31: oO0o * iII111i % OoOoOO00
if 80 - 80: ooOoO0o % I1ii11iIi11i % I11i . I1Ii111
if 3 - 3: ooOoO0o - Oo0Ooo
if 2 - 2: iII111i . iII111i
if 77 - 77: OOooOOo
if 74 - 74: O0
def lisp_timeout_map_cache_entry ( mc , delete_list ) :
if ( mc . map_cache_ttl == None ) :
lisp_update_encap_port ( mc )
return ( [ True , delete_list ] )
if 86 - 86: OoOoOO00
if 4 - 4: OoooooooOO * OoO0O00
O0oO0oOOO0oO = lisp_get_timestamp ( )
if 93 - 93: OoO0O00 - I1Ii111 - OoO0O00
if 1 - 1: o0oOOo0O0Ooo . oO0o * i11iIiiIii * IiII - OoO0O00 - OoooooooOO
if 29 - 29: iIii1I11I1II1 + OoO0O00 * II111iiii * Ii1I * iII111i . O0
if 6 - 6: I1IiiI - OoOoOO00
if 63 - 63: OOooOOo - oO0o * I1IiiI
if 60 - 60: II111iiii - Oo0Ooo
if ( mc . last_refresh_time + mc . map_cache_ttl > O0oO0oOOO0oO ) :
if ( mc . action == LISP_NO_ACTION ) : lisp_update_encap_port ( mc )
return ( [ True , delete_list ] )
if 43 - 43: I1IiiI - IiII - OOooOOo
if 19 - 19: I1Ii111 / I1Ii111 - i1IIi
if 99 - 99: O0
if 37 - 37: iIii1I11I1II1 / I1Ii111 + OoO0O00
if 85 - 85: ooOoO0o / I1IiiI
iIIiI1iiI = lisp_print_elapsed ( mc . last_refresh_time )
I11Ii11ii = mc . print_eid_tuple ( )
lprint ( "Map-cache entry for EID-prefix {} has {}, had uptime of {}" . format ( green ( I11Ii11ii , False ) , bold ( "timed out" , False ) , iIIiI1iiI ) )
if 7 - 7: Oo0Ooo - iIii1I11I1II1 / I1ii11iIi11i * I1IiiI + Ii1I
if 99 - 99: i11iIiiIii - I1ii11iIi11i
if 64 - 64: IiII . OoOoOO00 . Oo0Ooo . I1Ii111 / I11i / Ii1I
if 95 - 95: iIii1I11I1II1 . Ii1I % oO0o - I11i % IiII
if 42 - 42: OoOoOO00 + oO0o * i1IIi + i11iIiiIii
delete_list . append ( mc )
return ( [ True , delete_list ] )
if 25 - 25: Ii1I - Ii1I - I1ii11iIi11i / i1IIi . OoOoOO00 % Oo0Ooo
if 76 - 76: I1Ii111 / OoOoOO00
if 61 - 61: Oo0Ooo . i1IIi
if 78 - 78: i11iIiiIii
if 20 - 20: Ii1I
if 100 - 100: OoooooooOO . I1Ii111
if 32 - 32: iIii1I11I1II1 . iIii1I11I1II1 % II111iiii / Oo0Ooo . iIii1I11I1II1 . O0
if 63 - 63: I1IiiI . iIii1I11I1II1 . Oo0Ooo % OOooOOo - iII111i + ooOoO0o
def lisp_timeout_map_cache_walk ( mc , parms ) :
iIIii1III = parms [ 0 ]
OO000 = parms [ 1 ]
if 91 - 91: IiII % IiII % IiII
if 81 - 81: I1ii11iIi11i
if 59 - 59: I11i + i11iIiiIii
if 48 - 48: Oo0Ooo
if ( mc . group . is_null ( ) ) :
OooO000oo0o , iIIii1III = lisp_timeout_map_cache_entry ( mc , iIIii1III )
if ( iIIii1III == [ ] or mc != iIIii1III [ - 1 ] ) :
OO000 = lisp_write_checkpoint_entry ( OO000 , mc )
if 9 - 9: IiII - ooOoO0o * Ii1I / I1IiiI . i1IIi % O0
return ( [ OooO000oo0o , parms ] )
if 96 - 96: OoooooooOO
if 83 - 83: i1IIi * OoO0O00
if ( mc . source_cache == None ) : return ( [ True , parms ] )
if 30 - 30: OOooOOo % IiII
if 88 - 88: i1IIi - OoOoOO00
if 66 - 66: OoooooooOO - OoooooooOO * I11i / II111iiii + oO0o / Ii1I
if 7 - 7: Ii1I / iIii1I11I1II1
if 36 - 36: iIii1I11I1II1 % i11iIiiIii
parms = mc . source_cache . walk_cache ( lisp_timeout_map_cache_entry , parms )
return ( [ True , parms ] )
if 35 - 35: Oo0Ooo + I1IiiI - O0 - I1Ii111
if 64 - 64: i1IIi * OoOoOO00 / II111iiii * oO0o
if 35 - 35: i1IIi - Ii1I - Ii1I . O0 % iII111i * iII111i
if 15 - 15: OoooooooOO . Ii1I * I1Ii111 . ooOoO0o % OoO0O00 * Oo0Ooo
if 10 - 10: iII111i + i11iIiiIii . OOooOOo % iII111i - i1IIi
if 10 - 10: iIii1I11I1II1 * i11iIiiIii - O0
if 45 - 45: oO0o % OOooOOo - IiII + o0oOOo0O0Ooo + i11iIiiIii
def lisp_timeout_map_cache ( lisp_map_cache ) :
IiI11I111 = [ [ ] , [ ] ]
IiI11I111 = lisp_map_cache . walk_cache ( lisp_timeout_map_cache_walk , IiI11I111 )
if 79 - 79: IiII % I1Ii111 . I1IiiI + O0 * oO0o * ooOoO0o
if 38 - 38: IiII
if 78 - 78: Oo0Ooo * I1ii11iIi11i % OOooOOo / Oo0Ooo + I1ii11iIi11i * IiII
if 2 - 2: Oo0Ooo - OoOoOO00
if 22 - 22: OoO0O00 - oO0o - O0
iIIii1III = IiI11I111 [ 0 ]
for ooooOoo000O in iIIii1III : ooooOoo000O . delete_cache ( )
if 49 - 49: iIii1I11I1II1 + I1Ii111 / i11iIiiIii
if 62 - 62: ooOoO0o . I1IiiI * i11iIiiIii
if 2 - 2: i11iIiiIii
if 86 - 86: I1Ii111 + o0oOOo0O0Ooo
OO000 = IiI11I111 [ 1 ]
lisp_checkpoint ( OO000 )
return
if 17 - 17: iIii1I11I1II1
if 32 - 32: IiII - OoOoOO00
if 88 - 88: OOooOOo - II111iiii + i1IIi * Oo0Ooo
if 48 - 48: I1Ii111 + IiII % iII111i * iII111i + I1Ii111
if 83 - 83: OoO0O00 . I11i * I1ii11iIi11i - II111iiii
if 41 - 41: OoooooooOO . OoOoOO00 * iIii1I11I1II1
if 18 - 18: IiII / I1Ii111 % i1IIi * i11iIiiIii
if 16 - 16: Oo0Ooo
if 24 - 24: o0oOOo0O0Ooo . OoOoOO00
if 50 - 50: I1ii11iIi11i / iIii1I11I1II1 - Oo0Ooo - i11iIiiIii % o0oOOo0O0Ooo - ooOoO0o
if 92 - 92: OoooooooOO - I1ii11iIi11i . I11i / O0 % iII111i
if 96 - 96: I1IiiI . oO0o % O0
if 19 - 19: iIii1I11I1II1 + I1Ii111 / OoooooooOO % OOooOOo - i1IIi + I11i
if 87 - 87: OoooooooOO
if 97 - 97: ooOoO0o * IiII / iIii1I11I1II1
if 65 - 65: i1IIi - i11iIiiIii + oO0o % I1IiiI - OoO0O00 % ooOoO0o
def lisp_store_nat_info ( hostname , rloc , port ) :
ooOOo0o = rloc . print_address_no_iid ( )
IIOoOO = "{} NAT state for {}, RLOC {}, port {}" . format ( "{}" ,
blue ( hostname , False ) , red ( ooOOo0o , False ) , port )
if 94 - 94: i11iIiiIii * i11iIiiIii * I1ii11iIi11i
oOOo00OO0oO0o = lisp_nat_info ( ooOOo0o , hostname , port )
if 57 - 57: i11iIiiIii / iII111i / o0oOOo0O0Ooo
if ( lisp_nat_state_info . has_key ( hostname ) == False ) :
lisp_nat_state_info [ hostname ] = [ oOOo00OO0oO0o ]
lprint ( IIOoOO . format ( "Store initial" ) )
return ( True )
if 39 - 39: II111iiii * iII111i
if 7 - 7: OOooOOo + OoOoOO00 . II111iiii * OoO0O00 . I1IiiI * o0oOOo0O0Ooo
if 62 - 62: I1ii11iIi11i / iIii1I11I1II1 + oO0o . II111iiii
if 65 - 65: Oo0Ooo % i1IIi * o0oOOo0O0Ooo * IiII
if 24 - 24: i11iIiiIii / iIii1I11I1II1 / iII111i
if 31 - 31: OOooOOo . iIii1I11I1II1 - oO0o
iiI = lisp_nat_state_info [ hostname ] [ 0 ]
if ( iiI . address == ooOOo0o and iiI . port == port ) :
iiI . uptime = lisp_get_timestamp ( )
lprint ( IIOoOO . format ( "Refresh existing" ) )
return ( False )
if 36 - 36: O0
if 30 - 30: i11iIiiIii * Oo0Ooo . IiII
if 65 - 65: oO0o * IiII * OOooOOo / OoooooooOO % I11i / I1Ii111
if 21 - 21: i1IIi * iII111i + OoO0O00
if 27 - 27: I11i / oO0o . iII111i + o0oOOo0O0Ooo - OOooOOo
if 85 - 85: OoooooooOO
if 83 - 83: iII111i * I11i . OOooOOo - OoO0O00 % IiII
i11I1111iIII = None
for iiI in lisp_nat_state_info [ hostname ] :
if ( iiI . address == ooOOo0o and iiI . port == port ) :
i11I1111iIII = iiI
break
if 49 - 49: OOooOOo / i1IIi - II111iiii . iIii1I11I1II1 + I11i . OOooOOo
if 9 - 9: iIii1I11I1II1 + Ii1I + I11i
if 96 - 96: OoO0O00 + i11iIiiIii + OoO0O00
if ( i11I1111iIII == None ) :
lprint ( IIOoOO . format ( "Store new" ) )
else :
lisp_nat_state_info [ hostname ] . remove ( i11I1111iIII )
lprint ( IIOoOO . format ( "Use previous" ) )
if 7 - 7: i1IIi . I1IiiI
if 68 - 68: OoooooooOO
o00 = lisp_nat_state_info [ hostname ]
lisp_nat_state_info [ hostname ] = [ oOOo00OO0oO0o ] + o00
return ( True )
if 99 - 99: II111iiii + o0oOOo0O0Ooo + OOooOOo . O0 / iIii1I11I1II1
if 28 - 28: ooOoO0o + OoO0O00 / i1IIi
if 47 - 47: o0oOOo0O0Ooo / iII111i + iII111i % OoooooooOO
if 23 - 23: I11i * OoO0O00 * I1ii11iIi11i . i1IIi % II111iiii
if 15 - 15: O0 . I11i / IiII - iIii1I11I1II1 % Oo0Ooo
if 76 - 76: I1ii11iIi11i . IiII - IiII
if 51 - 51: i11iIiiIii
if 11 - 11: I1ii11iIi11i
def lisp_get_nat_info ( rloc , hostname ) :
if ( lisp_nat_state_info . has_key ( hostname ) == False ) : return ( None )
if 96 - 96: iII111i * iIii1I11I1II1
ooOOo0o = rloc . print_address_no_iid ( )
for iiI in lisp_nat_state_info [ hostname ] :
if ( iiI . address == ooOOo0o ) : return ( iiI )
if 100 - 100: iIii1I11I1II1 . I1ii11iIi11i . i11iIiiIii % i11iIiiIii % I11i % Ii1I
return ( None )
if 39 - 39: I11i + OoOoOO00
if 52 - 52: OoooooooOO - OoO0O00
if 24 - 24: iII111i / Oo0Ooo - I1ii11iIi11i + o0oOOo0O0Ooo
if 44 - 44: OoOoOO00 + I1IiiI . I1ii11iIi11i / i1IIi + II111iiii . Oo0Ooo
if 39 - 39: o0oOOo0O0Ooo
if 64 - 64: oO0o - i11iIiiIii
if 62 - 62: OoooooooOO - OoooooooOO / OoO0O00 - II111iiii . iIii1I11I1II1
if 2 - 2: O0 + o0oOOo0O0Ooo % OOooOOo . ooOoO0o % i1IIi
if 21 - 21: OoOoOO00 / OoooooooOO + I1Ii111 - IiII
if 62 - 62: Oo0Ooo % iII111i + OoooooooOO - I1ii11iIi11i % iII111i % iIii1I11I1II1
if 54 - 54: IiII + OoOoOO00 / II111iiii % i11iIiiIii . I1Ii111
if 69 - 69: i1IIi + ooOoO0o + Ii1I
if 88 - 88: OoOoOO00 + iII111i % O0 + OOooOOo / OoooooooOO / OOooOOo
if 95 - 95: ooOoO0o . Oo0Ooo % IiII + iII111i
if 16 - 16: I11i * OoO0O00 % o0oOOo0O0Ooo - O0 % II111iiii - I1IiiI
if 72 - 72: OoooooooOO * OoOoOO00 . OOooOOo + Ii1I . OOooOOo / II111iiii
if 8 - 8: i1IIi
if 1 - 1: OoOoOO00 . OoO0O00 . OoO0O00 * O0
if 97 - 97: OoooooooOO % ooOoO0o . I1Ii111 / iII111i
if 59 - 59: II111iiii + O0 . I1ii11iIi11i . Oo0Ooo * OoO0O00
def lisp_build_info_requests ( lisp_sockets , dest , port ) :
if ( lisp_nat_traversal == False ) : return
if 35 - 35: oO0o / I1Ii111 * OOooOOo + OoooooooOO . IiII
if 1 - 1: I1IiiI + I1Ii111 / OOooOOo . Ii1I . oO0o / I1ii11iIi11i
if 54 - 54: OOooOOo
if 86 - 86: oO0o * Oo0Ooo / OOooOOo
if 18 - 18: II111iiii - I1Ii111
if 13 - 13: i11iIiiIii - O0 % OoOoOO00 + OOooOOo * ooOoO0o
Ooo0000oO = [ ]
ooOooO00OO = [ ]
if ( dest == None ) :
for Ii1IIi1III1i in lisp_map_resolvers_list . values ( ) :
ooOooO00OO . append ( Ii1IIi1III1i . map_resolver )
if 79 - 79: ooOoO0o - O0
Ooo0000oO = ooOooO00OO
if ( Ooo0000oO == [ ] ) :
for ooooOOoO in lisp_map_servers_list . values ( ) :
Ooo0000oO . append ( ooooOOoO . map_server )
if 20 - 20: OOooOOo
if 22 - 22: iIii1I11I1II1 / I1Ii111
if ( Ooo0000oO == [ ] ) : return
else :
Ooo0000oO . append ( dest )
if 6 - 6: iII111i . i11iIiiIii / Oo0Ooo
if 86 - 86: I11i % I1Ii111 % oO0o - ooOoO0o / i1IIi
if 68 - 68: i1IIi % O0 % iII111i
if 55 - 55: I1ii11iIi11i % OOooOOo - o0oOOo0O0Ooo - II111iiii
if 52 - 52: I1Ii111
oOoII = { }
for iIiIIi1i in lisp_db_list :
for O0OO0O in iIiIIi1i . rloc_set :
lisp_update_local_rloc ( O0OO0O )
if ( O0OO0O . rloc . is_null ( ) ) : continue
if ( O0OO0O . interface == None ) : continue
if 34 - 34: II111iiii + iII111i / IiII
iIiIi1iI11iiI = O0OO0O . rloc . print_address_no_iid ( )
if ( iIiIi1iI11iiI in oOoII ) : continue
oOoII [ iIiIi1iI11iiI ] = O0OO0O . interface
if 47 - 47: OoO0O00
if 40 - 40: o0oOOo0O0Ooo / iII111i . o0oOOo0O0Ooo
if ( oOoII == { } ) :
lprint ( 'Suppress Info-Request, no "interface = <device>" RLOC ' + "found in any database-mappings" )
if 63 - 63: o0oOOo0O0Ooo * iIii1I11I1II1 * II111iiii . OoO0O00 - oO0o / OoOoOO00
return
if 78 - 78: i11iIiiIii / OoO0O00 / i1IIi . i11iIiiIii
if 100 - 100: II111iiii . IiII . I11i
if 60 - 60: OoOoOO00 % OOooOOo * i1IIi
if 3 - 3: OoooooooOO
if 75 - 75: OoooooooOO * I1Ii111 * o0oOOo0O0Ooo + I1ii11iIi11i . iIii1I11I1II1 / O0
if 23 - 23: oO0o - O0 * IiII + i11iIiiIii * Ii1I
for iIiIi1iI11iiI in oOoII :
II111IiiiI1 = oOoII [ iIiIi1iI11iiI ]
ii1iI1iI1 = red ( iIiIi1iI11iiI , False )
lprint ( "Build Info-Request for private address {} ({})" . format ( ii1iI1iI1 ,
II111IiiiI1 ) )
oO00O = II111IiiiI1 if len ( oOoII ) > 1 else None
for dest in Ooo0000oO :
lisp_send_info_request ( lisp_sockets , dest , port , oO00O )
if 8 - 8: ooOoO0o / II111iiii . I1ii11iIi11i * ooOoO0o % oO0o
if 36 - 36: I1ii11iIi11i % OOooOOo - ooOoO0o - I11i + I1IiiI
if 37 - 37: I1ii11iIi11i * IiII
if 65 - 65: OOooOOo / O0 . I1ii11iIi11i % i1IIi % Oo0Ooo
if 36 - 36: i11iIiiIii - OOooOOo + iII111i + iII111i * I11i * oO0o
if 14 - 14: O0 - iII111i * I1Ii111 - I1IiiI + IiII
if ( ooOooO00OO != [ ] ) :
for Ii1IIi1III1i in lisp_map_resolvers_list . values ( ) :
Ii1IIi1III1i . resolve_dns_name ( )
if 46 - 46: OoooooooOO * OoO0O00 . I1Ii111
if 95 - 95: ooOoO0o . I1ii11iIi11i . ooOoO0o / I1IiiI * OoOoOO00 . O0
return
if 78 - 78: oO0o
if 33 - 33: oO0o + i1IIi
if 32 - 32: iIii1I11I1II1
if 71 - 71: Ii1I * I1IiiI
if 62 - 62: II111iiii / I1IiiI . I1ii11iIi11i
if 49 - 49: IiII / OoOoOO00 / O0 * i11iIiiIii
if 47 - 47: i11iIiiIii + iII111i + i11iIiiIii
if 66 - 66: o0oOOo0O0Ooo . I1IiiI + OoooooooOO . iII111i / OoooooooOO - IiII
def lisp_valid_address_format ( kw , value ) :
if ( kw != "address" ) : return ( True )
if 47 - 47: o0oOOo0O0Ooo / II111iiii * i11iIiiIii * OoO0O00 . iIii1I11I1II1
if 34 - 34: I11i / o0oOOo0O0Ooo * OOooOOo * OOooOOo
if 89 - 89: I1ii11iIi11i . OoooooooOO
if 61 - 61: i1IIi + i11iIiiIii
if 59 - 59: i11iIiiIii * OOooOOo + i1IIi * iIii1I11I1II1 + I11i
if ( value [ 0 ] == "'" and value [ - 1 ] == "'" ) : return ( True )
if 97 - 97: OoO0O00 - I11i . OoooooooOO
if 58 - 58: I1ii11iIi11i / II111iiii / i11iIiiIii
if 27 - 27: iIii1I11I1II1 - O0 + OoOoOO00
if 28 - 28: oO0o . IiII * iII111i % Oo0Ooo - OoO0O00 / I11i
if ( value . find ( "." ) != - 1 ) :
iIiIi1iI11iiI = value . split ( "." )
if ( len ( iIiIi1iI11iiI ) != 4 ) : return ( False )
if 67 - 67: i11iIiiIii + i11iIiiIii / ooOoO0o - o0oOOo0O0Ooo
for OooO0o0 in iIiIi1iI11iiI :
if ( OooO0o0 . isdigit ( ) == False ) : return ( False )
if ( int ( OooO0o0 ) > 255 ) : return ( False )
if 29 - 29: o0oOOo0O0Ooo
return ( True )
if 51 - 51: OoOoOO00 / Ii1I . I1IiiI / Ii1I . II111iiii - iIii1I11I1II1
if 78 - 78: I11i
if 42 - 42: Ii1I
if 50 - 50: iIii1I11I1II1 / Ii1I . ooOoO0o / ooOoO0o * OoOoOO00 * iII111i
if 15 - 15: o0oOOo0O0Ooo % II111iiii + I1IiiI
if ( value . find ( "-" ) != - 1 ) :
iIiIi1iI11iiI = value . split ( "-" )
for II11iIII1i1I in [ "N" , "S" , "W" , "E" ] :
if ( II11iIII1i1I in iIiIi1iI11iiI ) :
if ( len ( iIiIi1iI11iiI ) < 8 ) : return ( False )
return ( True )
if 21 - 21: I1ii11iIi11i - ooOoO0o
if 81 - 81: iII111i / i11iIiiIii / I1Ii111
if 70 - 70: I1ii11iIi11i / i11iIiiIii
if 90 - 90: II111iiii / OoOoOO00 . Ii1I . OoooooooOO
if 76 - 76: OoooooooOO
if 78 - 78: IiII % i11iIiiIii
if 23 - 23: iIii1I11I1II1 - o0oOOo0O0Ooo - Ii1I % OOooOOo
if ( value . find ( "-" ) != - 1 ) :
iIiIi1iI11iiI = value . split ( "-" )
if ( len ( iIiIi1iI11iiI ) != 3 ) : return ( False )
if 100 - 100: oO0o . OoO0O00 . i11iIiiIii % II111iiii * IiII
for oOO0OooOo in iIiIi1iI11iiI :
try : int ( oOO0OooOo , 16 )
except : return ( False )
if 64 - 64: I11i * OoO0O00 . I1IiiI
return ( True )
if 99 - 99: IiII + OOooOOo - I11i . i1IIi % OoO0O00 - I11i
if 96 - 96: I1Ii111 / Ii1I
if 65 - 65: I1ii11iIi11i * O0 . IiII
if 11 - 11: I11i / Ii1I % oO0o
if 50 - 50: i11iIiiIii
if ( value . find ( ":" ) != - 1 ) :
iIiIi1iI11iiI = value . split ( ":" )
if ( len ( iIiIi1iI11iiI ) < 2 ) : return ( False )
if 93 - 93: i1IIi / Ii1I * II111iiii - Oo0Ooo . OoOoOO00 - OOooOOo
I111 = False
i1Ii11II = 0
for oOO0OooOo in iIiIi1iI11iiI :
i1Ii11II += 1
if ( oOO0OooOo == "" ) :
if ( I111 ) :
if ( len ( iIiIi1iI11iiI ) == i1Ii11II ) : break
if ( i1Ii11II > 2 ) : return ( False )
if 67 - 67: I1ii11iIi11i - OoO0O00 % O0 / I1ii11iIi11i - OOooOOo . O0
I111 = True
continue
if 4 - 4: O0
try : int ( oOO0OooOo , 16 )
except : return ( False )
if 35 - 35: Ii1I . II111iiii % OoOoOO00
return ( True )
if 3 - 3: OOooOOo - OoOoOO00
if 49 - 49: IiII / i11iIiiIii
if 84 - 84: iIii1I11I1II1 / i1IIi + OoOoOO00
if 40 - 40: Ii1I % OoO0O00
if 93 - 93: iII111i . I1Ii111 . oO0o % o0oOOo0O0Ooo . Oo0Ooo
if ( value [ 0 ] == "+" ) :
iIiIi1iI11iiI = value [ 1 : : ]
for oO0OOO0oo0Ooo in iIiIi1iI11iiI :
if ( oO0OOO0oo0Ooo . isdigit ( ) == False ) : return ( False )
if 73 - 73: I1Ii111 - II111iiii / Ii1I + Ii1I
return ( True )
if 41 - 41: II111iiii / II111iiii / iII111i * I1IiiI * I1Ii111 * oO0o
return ( False )
if 2 - 2: OoOoOO00 - I1ii11iIi11i * I1IiiI * Ii1I
if 41 - 41: OoOoOO00 . OOooOOo / OoOoOO00 % iIii1I11I1II1
if 47 - 47: ooOoO0o . i11iIiiIii / OoO0O00
if 48 - 48: O0
if 89 - 89: i11iIiiIii % OoO0O00 . OoOoOO00 + Oo0Ooo + OoOoOO00
if 53 - 53: Ii1I / OoOoOO00 % iII111i * OoooooooOO + Oo0Ooo
if 70 - 70: OoO0O00 % OoO0O00 * OoooooooOO
if 96 - 96: ooOoO0o * Ii1I + I11i + II111iiii * I1IiiI / iII111i
if 40 - 40: OoooooooOO - I11i % OOooOOo - I1IiiI . I1IiiI + Ii1I
if 97 - 97: OOooOOo . OoooooooOO . OOooOOo . i11iIiiIii
if 71 - 71: oO0o + I1ii11iIi11i * I1ii11iIi11i
if 79 - 79: oO0o
def lisp_process_api ( process , lisp_socket , data_structure ) :
ii1i1i1i1 , IiI11I111 = data_structure . split ( "%" )
if 4 - 4: o0oOOo0O0Ooo - O0 * OoooooooOO % O0 * Ii1I
lprint ( "Process API request '{}', parameters: '{}'" . format ( ii1i1i1i1 ,
IiI11I111 ) )
if 3 - 3: IiII + OoooooooOO - i1IIi
i11 = [ ]
if ( ii1i1i1i1 == "map-cache" ) :
if ( IiI11I111 == "" ) :
i11 = lisp_map_cache . walk_cache ( lisp_process_api_map_cache , i11 )
else :
i11 = lisp_process_api_map_cache_entry ( json . loads ( IiI11I111 ) )
if 94 - 94: ooOoO0o / iIii1I11I1II1 + I11i + I1ii11iIi11i
if 67 - 67: IiII / o0oOOo0O0Ooo . O0
if ( ii1i1i1i1 == "site-cache" ) :
if ( IiI11I111 == "" ) :
i11 = lisp_sites_by_eid . walk_cache ( lisp_process_api_site_cache ,
i11 )
else :
i11 = lisp_process_api_site_cache_entry ( json . loads ( IiI11I111 ) )
if 7 - 7: II111iiii . OoOoOO00 % OoOoOO00 % Ii1I + Oo0Ooo - ooOoO0o
if 29 - 29: OoOoOO00 - i1IIi
if ( ii1i1i1i1 == "map-server" ) :
IiI11I111 = { } if ( IiI11I111 == "" ) else json . loads ( IiI11I111 )
i11 = lisp_process_api_ms_or_mr ( True , IiI11I111 )
if 5 - 5: I1IiiI - ooOoO0o + O0
if ( ii1i1i1i1 == "map-resolver" ) :
IiI11I111 = { } if ( IiI11I111 == "" ) else json . loads ( IiI11I111 )
i11 = lisp_process_api_ms_or_mr ( False , IiI11I111 )
if 47 - 47: i1IIi - II111iiii - II111iiii
if ( ii1i1i1i1 == "database-mapping" ) :
i11 = lisp_process_api_database_mapping ( )
if 31 - 31: Ii1I
if 37 - 37: I1ii11iIi11i - Ii1I / oO0o . I1IiiI % I1Ii111
if 8 - 8: oO0o
if 46 - 46: I1Ii111 + IiII + II111iiii . o0oOOo0O0Ooo + i11iIiiIii
if 97 - 97: o0oOOo0O0Ooo % OoOoOO00 * O0 / iIii1I11I1II1 * OoO0O00 / i11iIiiIii
i11 = json . dumps ( i11 )
oOooOOoo = lisp_api_ipc ( process , i11 )
lisp_ipc ( oOooOOoo , lisp_socket , "lisp-core" )
return
if 1 - 1: OoooooooOO . Ii1I
if 68 - 68: Ii1I
if 98 - 98: iII111i
if 33 - 33: OoO0O00 - ooOoO0o % O0 % iIii1I11I1II1 * iII111i - iII111i
if 27 - 27: i11iIiiIii + I1ii11iIi11i + i1IIi
if 67 - 67: o0oOOo0O0Ooo
if 58 - 58: IiII % o0oOOo0O0Ooo + i1IIi
def lisp_process_api_map_cache ( mc , data ) :
if 33 - 33: II111iiii
if 61 - 61: I1Ii111
if 56 - 56: I1ii11iIi11i - OoooooooOO
if 52 - 52: Oo0Ooo - I11i - IiII - OoOoOO00
if ( mc . group . is_null ( ) ) : return ( lisp_gather_map_cache_data ( mc , data ) )
if 21 - 21: oO0o % o0oOOo0O0Ooo + I1Ii111 . OOooOOo / OOooOOo
if ( mc . source_cache == None ) : return ( [ True , data ] )
if 41 - 41: Oo0Ooo . ooOoO0o * oO0o
if 31 - 31: Oo0Ooo * IiII / IiII
if 3 - 3: I1Ii111
if 65 - 65: iIii1I11I1II1 % Oo0Ooo % I11i / OoooooooOO
if 82 - 82: o0oOOo0O0Ooo
data = mc . source_cache . walk_cache ( lisp_gather_map_cache_data , data )
return ( [ True , data ] )
if 33 - 33: OoOoOO00 / i11iIiiIii - I1IiiI - OoooooooOO + i1IIi * I1Ii111
if 92 - 92: iII111i + OoO0O00
if 70 - 70: iIii1I11I1II1
if 100 - 100: OOooOOo . oO0o % ooOoO0o * ooOoO0o . I1Ii111 - oO0o
if 33 - 33: Oo0Ooo . i1IIi - OoooooooOO
if 14 - 14: I1Ii111 + Oo0Ooo
if 35 - 35: i11iIiiIii * Ii1I
def lisp_gather_map_cache_data ( mc , data ) :
iiIIIIiI111 = { }
iiIIIIiI111 [ "instance-id" ] = str ( mc . eid . instance_id )
iiIIIIiI111 [ "eid-prefix" ] = mc . eid . print_prefix_no_iid ( )
if ( mc . group . is_null ( ) == False ) :
iiIIIIiI111 [ "group-prefix" ] = mc . group . print_prefix_no_iid ( )
if 100 - 100: O0 . iII111i / iIii1I11I1II1
iiIIIIiI111 [ "uptime" ] = lisp_print_elapsed ( mc . uptime )
iiIIIIiI111 [ "expires" ] = lisp_print_elapsed ( mc . uptime )
iiIIIIiI111 [ "action" ] = lisp_map_reply_action_string [ mc . action ]
iiIIIIiI111 [ "ttl" ] = "--" if mc . map_cache_ttl == None else str ( mc . map_cache_ttl / 60 )
if 47 - 47: ooOoO0o + OoOoOO00
if 67 - 67: IiII - I1ii11iIi11i * i1IIi - ooOoO0o
if 91 - 91: I11i
if 54 - 54: I1ii11iIi11i / i1IIi
if 14 - 14: iIii1I11I1II1 * I11i . I11i * ooOoO0o * iII111i
iiiI11II1IiIi = [ ]
for Oo0o0o0oo in mc . rloc_set :
Oo0O = { }
if ( Oo0o0o0oo . rloc_exists ( ) ) :
Oo0O [ "address" ] = Oo0o0o0oo . rloc . print_address_no_iid ( )
if 60 - 60: iIii1I11I1II1 + i1IIi + oO0o - iIii1I11I1II1 . i11iIiiIii * OoooooooOO
if 23 - 23: iII111i - IiII % i11iIiiIii
if ( Oo0o0o0oo . translated_port != 0 ) :
Oo0O [ "encap-port" ] = str ( Oo0o0o0oo . translated_port )
if 81 - 81: OoooooooOO % OoOoOO00 / IiII / OoooooooOO + i1IIi - O0
Oo0O [ "state" ] = Oo0o0o0oo . print_state ( )
if ( Oo0o0o0oo . geo ) : Oo0O [ "geo" ] = Oo0o0o0oo . geo . print_geo ( )
if ( Oo0o0o0oo . elp ) : Oo0O [ "elp" ] = Oo0o0o0oo . elp . print_elp ( False )
if ( Oo0o0o0oo . rle ) : Oo0O [ "rle" ] = Oo0o0o0oo . rle . print_rle ( False )
if ( Oo0o0o0oo . json ) : Oo0O [ "json" ] = Oo0o0o0oo . json . print_json ( False )
if ( Oo0o0o0oo . rloc_name ) : Oo0O [ "rloc-name" ] = Oo0o0o0oo . rloc_name
iiIIi11 = Oo0o0o0oo . stats . get_stats ( False , False )
if ( iiIIi11 ) : Oo0O [ "stats" ] = iiIIi11
Oo0O [ "uptime" ] = lisp_print_elapsed ( Oo0o0o0oo . uptime )
Oo0O [ "upriority" ] = str ( Oo0o0o0oo . priority )
Oo0O [ "uweight" ] = str ( Oo0o0o0oo . weight )
Oo0O [ "mpriority" ] = str ( Oo0o0o0oo . mpriority )
Oo0O [ "mweight" ] = str ( Oo0o0o0oo . mweight )
o000Oo00o = Oo0o0o0oo . last_rloc_probe_reply
if ( o000Oo00o ) :
Oo0O [ "last-rloc-probe-reply" ] = lisp_print_elapsed ( o000Oo00o )
Oo0O [ "rloc-probe-rtt" ] = str ( Oo0o0o0oo . rloc_probe_rtt )
if 78 - 78: OoO0O00 - ooOoO0o + Oo0Ooo % i1IIi % iIii1I11I1II1
Oo0O [ "rloc-hop-count" ] = Oo0o0o0oo . rloc_probe_hops
Oo0O [ "recent-rloc-hop-counts" ] = Oo0o0o0oo . recent_rloc_probe_hops
if 69 - 69: I11i % ooOoO0o
OoOii = [ ]
for oooOoo in Oo0o0o0oo . recent_rloc_probe_rtts : OoOii . append ( str ( oooOoo ) )
Oo0O [ "recent-rloc-probe-rtts" ] = OoOii
if 68 - 68: I1IiiI - i1IIi
iiiI11II1IiIi . append ( Oo0O )
if 98 - 98: OOooOOo . Oo0Ooo
iiIIIIiI111 [ "rloc-set" ] = iiiI11II1IiIi
if 83 - 83: OoooooooOO
data . append ( iiIIIIiI111 )
return ( [ True , data ] )
if 53 - 53: o0oOOo0O0Ooo - Oo0Ooo / IiII + O0
if 88 - 88: Oo0Ooo % I1Ii111 * O0 - i1IIi * OoO0O00
if 74 - 74: Oo0Ooo % iIii1I11I1II1 + OOooOOo
if 50 - 50: OoO0O00 . OoooooooOO
if 31 - 31: OoO0O00
if 55 - 55: OoOoOO00 + I1Ii111 * o0oOOo0O0Ooo - I1ii11iIi11i + OoOoOO00
if 6 - 6: II111iiii % iIii1I11I1II1 * I1Ii111
def lisp_process_api_map_cache_entry ( parms ) :
II1 = parms [ "instance-id" ]
II1 = 0 if ( II1 == "" ) else int ( II1 )
if 2 - 2: IiII - I1Ii111 . iIii1I11I1II1 - Ii1I * I11i
if 58 - 58: i1IIi % iIii1I11I1II1 % i11iIiiIii - o0oOOo0O0Ooo + ooOoO0o
if 23 - 23: Oo0Ooo % Oo0Ooo / IiII
if 63 - 63: I11i % Oo0Ooo * I1Ii111 - Oo0Ooo % i11iIiiIii . II111iiii
Oo00o = lisp_address ( LISP_AFI_NONE , "" , 0 , II1 )
Oo00o . store_prefix ( parms [ "eid-prefix" ] )
iIi11i1I11Ii = Oo00o
oo = Oo00o
if 44 - 44: I11i . I1Ii111 . I1ii11iIi11i . oO0o
if 1 - 1: I11i % II111iiii / OoO0O00 + OoO0O00
if 46 - 46: Oo0Ooo * Ii1I / IiII % O0 * iII111i
if 74 - 74: OoooooooOO + Ii1I
if 100 - 100: I1IiiI
i1i11Ii1 = lisp_address ( LISP_AFI_NONE , "" , 0 , II1 )
if ( parms . has_key ( "group-prefix" ) ) :
i1i11Ii1 . store_prefix ( parms [ "group-prefix" ] )
iIi11i1I11Ii = i1i11Ii1
if 59 - 59: I1IiiI - OoOoOO00 * ooOoO0o / O0
if 54 - 54: Oo0Ooo % iIii1I11I1II1 * Oo0Ooo
i11 = [ ]
ooooOoo000O = lisp_map_cache_lookup ( oo , iIi11i1I11Ii )
if ( ooooOoo000O ) : OooO000oo0o , i11 = lisp_process_api_map_cache ( ooooOoo000O , i11 )
return ( i11 )
if 80 - 80: I1ii11iIi11i - I1ii11iIi11i
if 26 - 26: I1ii11iIi11i - I1IiiI * I1Ii111 % iIii1I11I1II1
if 77 - 77: o0oOOo0O0Ooo + I1Ii111 . OOooOOo . i1IIi . I1IiiI
if 100 - 100: ooOoO0o . i11iIiiIii + Ii1I - OOooOOo - i11iIiiIii - OoooooooOO
if 42 - 42: OoOoOO00 . I1IiiI / OoOoOO00 / I1ii11iIi11i . OoO0O00
if 67 - 67: Ii1I - O0 . OoooooooOO . I1Ii111 . o0oOOo0O0Ooo
if 73 - 73: I11i - oO0o . I1Ii111 + oO0o
def lisp_process_api_site_cache ( se , data ) :
if 48 - 48: IiII . IiII * o0oOOo0O0Ooo * II111iiii % ooOoO0o
if 40 - 40: I1ii11iIi11i
if 76 - 76: Oo0Ooo - I11i
if 82 - 82: OoO0O00 % oO0o . I11i / O0 - I1Ii111
if ( se . group . is_null ( ) ) : return ( lisp_gather_site_cache_data ( se , data ) )
if 39 - 39: I1IiiI
if ( se . source_cache == None ) : return ( [ True , data ] )
if 8 - 8: IiII * i1IIi * i1IIi * O0
if 69 - 69: Oo0Ooo
if 48 - 48: iII111i
if 11 - 11: i11iIiiIii * OoOoOO00 . OoO0O00
if 47 - 47: Oo0Ooo % I1Ii111 + ooOoO0o
data = se . source_cache . walk_cache ( lisp_gather_site_cache_data , data )
return ( [ True , data ] )
if 89 - 89: iII111i
if 29 - 29: I1ii11iIi11i . ooOoO0o * II111iiii / iII111i . OoooooooOO - OoOoOO00
if 99 - 99: IiII % O0 - I1Ii111 * OoO0O00
if 77 - 77: OoooooooOO - I11i / I1IiiI % OoOoOO00 - OOooOOo
if 37 - 37: ooOoO0o
if 22 - 22: I1ii11iIi11i + II111iiii / OoooooooOO % o0oOOo0O0Ooo * OoOoOO00 . Oo0Ooo
if 26 - 26: OoO0O00 % oO0o * Ii1I % OoooooooOO - oO0o
def lisp_process_api_ms_or_mr ( ms_or_mr , data ) :
Iiii1Ii1I = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
Ooooo000 = data [ "dns-name" ] if data . has_key ( "dns-name" ) else None
if ( data . has_key ( "address" ) ) :
Iiii1Ii1I . store_address ( data [ "address" ] )
if 46 - 46: I1IiiI + OoO0O00 - O0 * O0
if 75 - 75: OOooOOo + iIii1I11I1II1 * OOooOOo
ooOo0O0O0oOO0 = { }
if ( ms_or_mr ) :
for ooooOOoO in lisp_map_servers_list . values ( ) :
if ( Ooooo000 ) :
if ( Ooooo000 != ooooOOoO . dns_name ) : continue
else :
if ( Iiii1Ii1I . is_exact_match ( ooooOOoO . map_server ) == False ) : continue
if 82 - 82: iII111i - I1Ii111 - OoOoOO00
if 96 - 96: Oo0Ooo . Oo0Ooo % o0oOOo0O0Ooo - I1IiiI * iIii1I11I1II1
ooOo0O0O0oOO0 [ "dns-name" ] = ooooOOoO . dns_name
ooOo0O0O0oOO0 [ "address" ] = ooooOOoO . map_server . print_address_no_iid ( )
ooOo0O0O0oOO0 [ "ms-name" ] = "" if ooooOOoO . ms_name == None else ooooOOoO . ms_name
return ( [ ooOo0O0O0oOO0 ] )
if 29 - 29: i1IIi / Ii1I / oO0o * iII111i
else :
for Ii1IIi1III1i in lisp_map_resolvers_list . values ( ) :
if ( Ooooo000 ) :
if ( Ooooo000 != Ii1IIi1III1i . dns_name ) : continue
else :
if ( Iiii1Ii1I . is_exact_match ( Ii1IIi1III1i . map_resolver ) == False ) : continue
if 44 - 44: O0
if 95 - 95: OOooOOo + OOooOOo - OoOoOO00
ooOo0O0O0oOO0 [ "dns-name" ] = Ii1IIi1III1i . dns_name
ooOo0O0O0oOO0 [ "address" ] = Ii1IIi1III1i . map_resolver . print_address_no_iid ( )
ooOo0O0O0oOO0 [ "mr-name" ] = "" if Ii1IIi1III1i . mr_name == None else Ii1IIi1III1i . mr_name
return ( [ ooOo0O0O0oOO0 ] )
if 83 - 83: II111iiii * ooOoO0o - O0 - i11iIiiIii
if 62 - 62: I1IiiI + II111iiii * iIii1I11I1II1 % iII111i + IiII / ooOoO0o
return ( [ ] )
if 14 - 14: iIii1I11I1II1 * I1ii11iIi11i + OOooOOo + O0
if 79 - 79: II111iiii - iII111i
if 89 - 89: O0 - OoO0O00
if 8 - 8: I1ii11iIi11i / oO0o - OoooooooOO + ooOoO0o + o0oOOo0O0Ooo % i11iIiiIii
if 32 - 32: O0 + IiII
if 93 - 93: OoOoOO00 - I11i / iII111i - iIii1I11I1II1 + I11i % oO0o
if 24 - 24: Ii1I / iIii1I11I1II1 + o0oOOo0O0Ooo
if 17 - 17: OOooOOo
def lisp_process_api_database_mapping ( ) :
i11 = [ ]
if 75 - 75: Ii1I / i1IIi % I1ii11iIi11i . Ii1I
for iIiIIi1i in lisp_db_list :
iiIIIIiI111 = { }
iiIIIIiI111 [ "eid-prefix" ] = iIiIIi1i . eid . print_prefix ( )
if ( iIiIIi1i . group . is_null ( ) == False ) :
iiIIIIiI111 [ "group-prefix" ] = iIiIIi1i . group . print_prefix ( )
if 46 - 46: II111iiii * OoO0O00
if 77 - 77: ooOoO0o * I11i
ooo0o0 = [ ]
for Oo0O in iIiIIi1i . rloc_set :
Oo0o0o0oo = { }
if ( Oo0O . rloc . is_null ( ) == False ) :
Oo0o0o0oo [ "rloc" ] = Oo0O . rloc . print_address_no_iid ( )
if 85 - 85: OoO0O00 * I1Ii111 - OoooooooOO / iIii1I11I1II1 - i1IIi + Ii1I
if ( Oo0O . rloc_name != None ) : Oo0o0o0oo [ "rloc-name" ] = Oo0O . rloc_name
if ( Oo0O . interface != None ) : Oo0o0o0oo [ "interface" ] = Oo0O . interface
o0oOOOoO0OoO = Oo0O . translated_rloc
if ( o0oOOOoO0OoO . is_null ( ) == False ) :
Oo0o0o0oo [ "translated-rloc" ] = o0oOOOoO0OoO . print_address_no_iid ( )
if 51 - 51: i11iIiiIii
if ( Oo0o0o0oo != { } ) : ooo0o0 . append ( Oo0o0o0oo )
if 39 - 39: o0oOOo0O0Ooo % I1Ii111 % i1IIi - II111iiii + i11iIiiIii
if 62 - 62: I1ii11iIi11i - I1IiiI * i11iIiiIii % oO0o
if 63 - 63: II111iiii - Oo0Ooo
if 55 - 55: iIii1I11I1II1 / O0 * O0 * i11iIiiIii * OoooooooOO
if 94 - 94: II111iiii . II111iiii / OoOoOO00 % oO0o * i1IIi % Oo0Ooo
iiIIIIiI111 [ "rlocs" ] = ooo0o0
if 78 - 78: IiII - I1IiiI
if 59 - 59: oO0o + i1IIi - IiII % OOooOOo % iIii1I11I1II1
if 71 - 71: OoO0O00
if 72 - 72: II111iiii + o0oOOo0O0Ooo / i1IIi * Oo0Ooo / i1IIi
i11 . append ( iiIIIIiI111 )
if 52 - 52: I1Ii111 % OoO0O00 . I1Ii111 * I1ii11iIi11i * OoOoOO00 + i1IIi
return ( i11 )
if 54 - 54: Ii1I / I1IiiI
if 7 - 7: iIii1I11I1II1 . O0 + OOooOOo . Ii1I * Oo0Ooo
if 25 - 25: I1Ii111 . Oo0Ooo % II111iiii . IiII - O0
if 18 - 18: oO0o * OOooOOo
if 19 - 19: iIii1I11I1II1 / I1ii11iIi11i - I1ii11iIi11i / iIii1I11I1II1
if 42 - 42: iIii1I11I1II1 / OOooOOo - O0 * OoooooooOO / i1IIi
if 33 - 33: OOooOOo . o0oOOo0O0Ooo % OoO0O00 - I1Ii111 . OoooooooOO
def lisp_gather_site_cache_data ( se , data ) :
iiIIIIiI111 = { }
iiIIIIiI111 [ "site-name" ] = se . site . site_name
iiIIIIiI111 [ "instance-id" ] = str ( se . eid . instance_id )
iiIIIIiI111 [ "eid-prefix" ] = se . eid . print_prefix_no_iid ( )
if ( se . group . is_null ( ) == False ) :
iiIIIIiI111 [ "group-prefix" ] = se . group . print_prefix_no_iid ( )
if 96 - 96: II111iiii % I11i / Ii1I - i11iIiiIii
iiIIIIiI111 [ "registered" ] = "yes" if se . registered else "no"
iiIIIIiI111 [ "first-registered" ] = lisp_print_elapsed ( se . first_registered )
iiIIIIiI111 [ "last-registered" ] = lisp_print_elapsed ( se . last_registered )
if 63 - 63: I1IiiI
iIiIi1iI11iiI = se . last_registerer
iIiIi1iI11iiI = "none" if iIiIi1iI11iiI . is_null ( ) else iIiIi1iI11iiI . print_address ( )
iiIIIIiI111 [ "last-registerer" ] = iIiIi1iI11iiI
iiIIIIiI111 [ "ams" ] = "yes" if ( se . accept_more_specifics ) else "no"
iiIIIIiI111 [ "dynamic" ] = "yes" if ( se . dynamic ) else "no"
iiIIIIiI111 [ "site-id" ] = str ( se . site_id )
if ( se . xtr_id_present ) :
iiIIIIiI111 [ "xtr-id" ] = "0x" + lisp_hex_string ( se . xtr_id )
if 15 - 15: iIii1I11I1II1 - I1ii11iIi11i % OoO0O00 * II111iiii / I11i + I11i
if 23 - 23: I1IiiI
if 51 - 51: i11iIiiIii / ooOoO0o - OoooooooOO + OoOoOO00 + oO0o
if 57 - 57: iIii1I11I1II1
if 19 - 19: Ii1I / o0oOOo0O0Ooo + O0 / iIii1I11I1II1 + II111iiii
iiiI11II1IiIi = [ ]
for Oo0o0o0oo in se . registered_rlocs :
Oo0O = { }
Oo0O [ "address" ] = Oo0o0o0oo . rloc . print_address_no_iid ( ) if Oo0o0o0oo . rloc_exists ( ) else "none"
if 3 - 3: oO0o % OoO0O00 % OOooOOo
if 64 - 64: o0oOOo0O0Ooo . II111iiii * IiII % Oo0Ooo + I11i - OoooooooOO
if ( Oo0o0o0oo . geo ) : Oo0O [ "geo" ] = Oo0o0o0oo . geo . print_geo ( )
if ( Oo0o0o0oo . elp ) : Oo0O [ "elp" ] = Oo0o0o0oo . elp . print_elp ( False )
if ( Oo0o0o0oo . rle ) : Oo0O [ "rle" ] = Oo0o0o0oo . rle . print_rle ( False )
if ( Oo0o0o0oo . json ) : Oo0O [ "json" ] = Oo0o0o0oo . json . print_json ( False )
if ( Oo0o0o0oo . rloc_name ) : Oo0O [ "rloc-name" ] = Oo0o0o0oo . rloc_name
Oo0O [ "uptime" ] = lisp_print_elapsed ( Oo0o0o0oo . uptime )
Oo0O [ "upriority" ] = str ( Oo0o0o0oo . priority )
Oo0O [ "uweight" ] = str ( Oo0o0o0oo . weight )
Oo0O [ "mpriority" ] = str ( Oo0o0o0oo . mpriority )
Oo0O [ "mweight" ] = str ( Oo0o0o0oo . mweight )
if 58 - 58: ooOoO0o
iiiI11II1IiIi . append ( Oo0O )
if 15 - 15: O0 * OOooOOo * I11i + Ii1I * OoooooooOO + OOooOOo
iiIIIIiI111 [ "registered-rlocs" ] = iiiI11II1IiIi
if 77 - 77: O0
data . append ( iiIIIIiI111 )
return ( [ True , data ] )
if 98 - 98: iII111i - iII111i % i1IIi - I1Ii111 . I1IiiI % o0oOOo0O0Ooo
if 38 - 38: IiII % OoOoOO00 . OOooOOo . I1ii11iIi11i
if 34 - 34: iII111i . i11iIiiIii + OoO0O00 + o0oOOo0O0Ooo / ooOoO0o - i11iIiiIii
if 63 - 63: ooOoO0o % OoO0O00 % ooOoO0o
if 28 - 28: IiII * I1Ii111 * o0oOOo0O0Ooo + ooOoO0o - IiII / IiII
if 73 - 73: iIii1I11I1II1 . I1ii11iIi11i + OOooOOo
if 51 - 51: I11i % Oo0Ooo * OOooOOo % OoooooooOO - OoOoOO00 % Ii1I
def lisp_process_api_site_cache_entry ( parms ) :
II1 = parms [ "instance-id" ]
II1 = 0 if ( II1 == "" ) else int ( II1 )
if 60 - 60: OoOoOO00 - IiII + OoO0O00
if 77 - 77: iIii1I11I1II1
if 92 - 92: IiII
if 68 - 68: OOooOOo . IiII / iIii1I11I1II1 % i11iIiiIii
Oo00o = lisp_address ( LISP_AFI_NONE , "" , 0 , II1 )
Oo00o . store_prefix ( parms [ "eid-prefix" ] )
if 74 - 74: iII111i + i11iIiiIii
if 95 - 95: Ii1I
if 49 - 49: I1ii11iIi11i . i1IIi + OoO0O00 % O0 + OoO0O00
if 21 - 21: ooOoO0o * oO0o / OoooooooOO % ooOoO0o / O0
if 24 - 24: OoO0O00 - i11iIiiIii / i11iIiiIii * I1Ii111
i1i11Ii1 = lisp_address ( LISP_AFI_NONE , "" , 0 , II1 )
if ( parms . has_key ( "group-prefix" ) ) :
i1i11Ii1 . store_prefix ( parms [ "group-prefix" ] )
if 20 - 20: IiII % iIii1I11I1II1 . iII111i + iIii1I11I1II1 + O0
if 96 - 96: I1ii11iIi11i - IiII % OoooooooOO . iII111i
i11 = [ ]
iIi1II1 = lisp_site_eid_lookup ( Oo00o , i1i11Ii1 , False )
if ( iIi1II1 ) : lisp_gather_site_cache_data ( iIi1II1 , i11 )
return ( i11 )
if 30 - 30: Oo0Ooo . OoooooooOO / Oo0Ooo / oO0o
if 44 - 44: I1ii11iIi11i % o0oOOo0O0Ooo / iIii1I11I1II1 - o0oOOo0O0Ooo / I11i * I1Ii111
if 49 - 49: iII111i / iII111i - OoOoOO00
if 89 - 89: ooOoO0o
if 16 - 16: oO0o + oO0o + i1IIi + iIii1I11I1II1
if 93 - 93: I1IiiI - i11iIiiIii * I1Ii111 - O0 + iII111i
if 11 - 11: iII111i
def lisp_get_interface_instance_id ( device , source_eid ) :
II111IiiiI1 = None
if ( lisp_myinterfaces . has_key ( device ) ) :
II111IiiiI1 = lisp_myinterfaces [ device ]
if 100 - 100: OoooooooOO / ooOoO0o . OoO0O00
if 89 - 89: I11i % II111iiii
if 35 - 35: oO0o
if 65 - 65: II111iiii
if 87 - 87: oO0o / OoO0O00 - oO0o
if 69 - 69: i11iIiiIii
if ( II111IiiiI1 == None or II111IiiiI1 . instance_id == None ) :
return ( lisp_default_iid )
if 29 - 29: IiII . ooOoO0o / iII111i - OOooOOo / OOooOOo % Oo0Ooo
if 42 - 42: OoO0O00 . I1Ii111 . I1IiiI + Oo0Ooo * O0
if 35 - 35: Oo0Ooo / iII111i - O0 - OOooOOo * Oo0Ooo . i11iIiiIii
if 43 - 43: OoOoOO00 % oO0o % OoO0O00 / Ii1I . I11i
if 86 - 86: I1Ii111 * i1IIi + IiII - OoOoOO00
if 14 - 14: I1ii11iIi11i / i11iIiiIii * I11i % o0oOOo0O0Ooo + IiII / I1ii11iIi11i
if 82 - 82: OOooOOo . oO0o
if 12 - 12: i11iIiiIii + II111iiii
if 49 - 49: OoooooooOO
II1 = II111IiiiI1 . get_instance_id ( )
if ( source_eid == None ) : return ( II1 )
if 48 - 48: i1IIi . IiII - O0 + OoooooooOO
I1IIiI1I1iiiI = source_eid . instance_id
IIIii = None
for II111IiiiI1 in lisp_multi_tenant_interfaces :
if ( II111IiiiI1 . device != device ) : continue
OOO0000o = II111IiiiI1 . multi_tenant_eid
source_eid . instance_id = OOO0000o . instance_id
if ( source_eid . is_more_specific ( OOO0000o ) == False ) : continue
if ( IIIii == None or IIIii . multi_tenant_eid . mask_len < OOO0000o . mask_len ) :
IIIii = II111IiiiI1
if 27 - 27: OOooOOo + IiII
if 21 - 21: OOooOOo - i1IIi
source_eid . instance_id = I1IIiI1I1iiiI
if 65 - 65: OoooooooOO
if ( IIIii == None ) : return ( II1 )
return ( IIIii . get_instance_id ( ) )
if 31 - 31: o0oOOo0O0Ooo . i1IIi - i1IIi % i1IIi - iIii1I11I1II1
if 50 - 50: IiII - OOooOOo % OoOoOO00
if 66 - 66: IiII * i11iIiiIii
if 64 - 64: i11iIiiIii . I1Ii111 % i11iIiiIii % I11i
if 56 - 56: o0oOOo0O0Ooo + ooOoO0o + OoooooooOO
if 64 - 64: OOooOOo / OoOoOO00
if 30 - 30: OOooOOo % I1Ii111 - i11iIiiIii
if 20 - 20: i1IIi * I11i / OoO0O00 / i1IIi / I1Ii111 * O0
if 95 - 95: Ii1I + Ii1I % IiII - IiII / OOooOOo
def lisp_allow_dynamic_eid ( device , eid ) :
if ( lisp_myinterfaces . has_key ( device ) == False ) : return ( None )
if 46 - 46: IiII + iII111i + II111iiii . iII111i - i11iIiiIii % OoO0O00
II111IiiiI1 = lisp_myinterfaces [ device ]
IIi1iI = device if II111IiiiI1 . dynamic_eid_device == None else II111IiiiI1 . dynamic_eid_device
if 3 - 3: i11iIiiIii / I1ii11iIi11i
if 49 - 49: IiII
if ( II111IiiiI1 . does_dynamic_eid_match ( eid ) ) : return ( IIi1iI )
return ( None )
if 1 - 1: oO0o / I11i
if 99 - 99: OoO0O00 % IiII + I1Ii111 - oO0o
if 28 - 28: OOooOOo - O0 - O0 % i11iIiiIii * OoooooooOO
if 60 - 60: OoooooooOO / i1IIi / i1IIi / Ii1I . IiII
if 24 - 24: O0
if 6 - 6: I1IiiI . i11iIiiIii . OoooooooOO . I1IiiI . o0oOOo0O0Ooo
if 65 - 65: i11iIiiIii
def lisp_start_rloc_probe_timer ( interval , lisp_sockets ) :
global lisp_rloc_probe_timer
if 46 - 46: i11iIiiIii
if ( lisp_rloc_probe_timer != None ) : lisp_rloc_probe_timer . cancel ( )
if 70 - 70: i1IIi + o0oOOo0O0Ooo
i11Ii1ii = lisp_process_rloc_probe_timer
I1i1i1Ii1II1 = threading . Timer ( interval , i11Ii1ii , [ lisp_sockets ] )
lisp_rloc_probe_timer = I1i1i1Ii1II1
I1i1i1Ii1II1 . start ( )
return
if 36 - 36: OoO0O00 * I11i . ooOoO0o
if 50 - 50: oO0o * OoOoOO00 / OoO0O00 / ooOoO0o + II111iiii
if 55 - 55: II111iiii - IiII
if 24 - 24: oO0o % Ii1I / i1IIi
if 84 - 84: i1IIi
if 53 - 53: OoooooooOO - i1IIi - Ii1I
if 73 - 73: I1ii11iIi11i - Ii1I * o0oOOo0O0Ooo
def lisp_show_rloc_probe_list ( ) :
lprint ( bold ( "----- RLOC-probe-list -----" , False ) )
for Iiii11 in lisp_rloc_probe_list :
II11I1IiII = lisp_rloc_probe_list [ Iiii11 ]
lprint ( "RLOC {}:" . format ( Iiii11 ) )
for Oo0O , Oo0ooo0Ooo , o0 in II11I1IiII :
lprint ( " [{}, {}, {}, {}]" . format ( hex ( id ( Oo0O ) ) , Oo0ooo0Ooo . print_prefix ( ) ,
o0 . print_prefix ( ) , Oo0O . translated_port ) )
if 7 - 7: II111iiii . II111iiii . iII111i - O0 + I1Ii111
if 36 - 36: I1Ii111 / OoooooooOO % I1Ii111 * i11iIiiIii - I11i - I11i
lprint ( bold ( "---------------------------" , False ) )
return
if 55 - 55: II111iiii - OOooOOo % II111iiii + iII111i . o0oOOo0O0Ooo + i11iIiiIii
if 43 - 43: I1IiiI
if 39 - 39: IiII * OOooOOo . OoooooooOO + Oo0Ooo + iIii1I11I1II1
if 67 - 67: iII111i . OOooOOo / ooOoO0o * iIii1I11I1II1
if 29 - 29: I1Ii111 / OoOoOO00 % I1ii11iIi11i * IiII / II111iiii
if 10 - 10: O0 / I11i
if 29 - 29: i11iIiiIii % I11i
if 49 - 49: I11i
if 69 - 69: o0oOOo0O0Ooo . O0 * I11i
def lisp_mark_rlocs_for_other_eids ( eid_list ) :
if 92 - 92: OoO0O00 . O0 / Ii1I % Oo0Ooo . Ii1I
if 40 - 40: o0oOOo0O0Ooo - Ii1I . iII111i - O0
if 53 - 53: Oo0Ooo - I1IiiI * O0 . II111iiii
if 72 - 72: ooOoO0o - Ii1I . Ii1I . I11i / OoooooooOO + Ii1I
Oo0o0o0oo , Oo0ooo0Ooo , o0 = eid_list [ 0 ]
iII1II = [ lisp_print_eid_tuple ( Oo0ooo0Ooo , o0 ) ]
if 43 - 43: I1ii11iIi11i % I1ii11iIi11i % i1IIi
for Oo0o0o0oo , Oo0ooo0Ooo , o0 in eid_list [ 1 : : ] :
Oo0o0o0oo . state = LISP_RLOC_UNREACH_STATE
Oo0o0o0oo . last_state_change = lisp_get_timestamp ( )
iII1II . append ( lisp_print_eid_tuple ( Oo0ooo0Ooo , o0 ) )
if 56 - 56: I1IiiI - OoO0O00 - iII111i . o0oOOo0O0Ooo . I1Ii111
if 70 - 70: iIii1I11I1II1 - I11i
iI1i = bold ( "unreachable" , False )
ooOOo00o0ooO = red ( Oo0o0o0oo . rloc . print_address_no_iid ( ) , False )
if 45 - 45: OoO0O00 % iII111i / iIii1I11I1II1 % I1IiiI + OOooOOo
for Oo00o in iII1II :
Oo0ooo0Ooo = green ( Oo00o , False )
lprint ( "RLOC {} went {} for EID {}" . format ( ooOOo00o0ooO , iI1i , Oo0ooo0Ooo ) )
if 62 - 62: OOooOOo . OOooOOo . oO0o
if 18 - 18: iII111i . I1IiiI . ooOoO0o * oO0o / OoooooooOO
if 85 - 85: i1IIi
if 79 - 79: I11i - I11i
if 25 - 25: OOooOOo / O0 / iIii1I11I1II1 + II111iiii * Ii1I
if 74 - 74: i1IIi . I1Ii111 / O0 + Oo0Ooo * OOooOOo
for Oo0o0o0oo , Oo0ooo0Ooo , o0 in eid_list :
ooooOoo000O = lisp_map_cache . lookup_cache ( Oo0ooo0Ooo , True )
if ( ooooOoo000O ) : lisp_write_ipc_map_cache ( True , ooooOoo000O )
if 90 - 90: I1IiiI * II111iiii . Oo0Ooo % I1IiiI
return
if 100 - 100: iIii1I11I1II1 - OoooooooOO * OoooooooOO - iII111i / ooOoO0o
if 98 - 98: OoO0O00 + oO0o - II111iiii
if 84 - 84: Oo0Ooo . OoOoOO00 - iII111i
if 5 - 5: OoooooooOO . O0 / OOooOOo + I11i - Ii1I
if 77 - 77: iIii1I11I1II1 * Oo0Ooo . IiII / oO0o + O0
if 76 - 76: iII111i + o0oOOo0O0Ooo - OoooooooOO * oO0o % OoooooooOO - O0
if 18 - 18: Ii1I
if 82 - 82: OoOoOO00 + OoO0O00 - IiII / ooOoO0o
if 70 - 70: OoO0O00
if 43 - 43: ooOoO0o + OOooOOo + II111iiii - I1IiiI
def lisp_process_rloc_probe_timer ( lisp_sockets ) :
lisp_set_exception ( )
if 58 - 58: I11i
lisp_start_rloc_probe_timer ( LISP_RLOC_PROBE_INTERVAL , lisp_sockets )
if ( lisp_rloc_probing == False ) : return
if 94 - 94: Oo0Ooo
if 39 - 39: I11i - oO0o % iII111i - ooOoO0o - OoOoOO00
if 8 - 8: i1IIi % i1IIi % OoooooooOO % i1IIi . iIii1I11I1II1
if 70 - 70: O0 + II111iiii % IiII / I1Ii111 - IiII
if ( lisp_print_rloc_probe_list ) : lisp_show_rloc_probe_list ( )
if 58 - 58: II111iiii * oO0o - i1IIi . I11i
if 23 - 23: OoO0O00 - I1IiiI * i11iIiiIii
if 62 - 62: OoO0O00 . i11iIiiIii / i1IIi
if 3 - 3: OoO0O00 + O0 % Oo0Ooo * Oo0Ooo % i11iIiiIii
I1i1 = lisp_get_default_route_next_hops ( )
if 51 - 51: I1IiiI . ooOoO0o / Ii1I / I1Ii111
lprint ( "---------- Start RLOC Probing for {} entries ----------" . format ( len ( lisp_rloc_probe_list ) ) )
if 84 - 84: I11i - Ii1I
if 36 - 36: i1IIi
if 21 - 21: iII111i . OoOoOO00 % o0oOOo0O0Ooo - i11iIiiIii
if 86 - 86: I1Ii111 % i11iIiiIii
if 22 - 22: I1Ii111
i1Ii11II = 0
oo00OO0Oooo = bold ( "RLOC-probe" , False )
for OOOo0O in lisp_rloc_probe_list . values ( ) :
if 51 - 51: OOooOOo
if 60 - 60: ooOoO0o % iIii1I11I1II1 / iIii1I11I1II1
if 61 - 61: oO0o
if 12 - 12: iIii1I11I1II1 - I1ii11iIi11i % I1ii11iIi11i * I1Ii111
if 98 - 98: oO0o / iII111i - Oo0Ooo / I1Ii111 * oO0o - OoO0O00
I1oo0O0oOOoOo = None
for oOoI1IiII11II11 , Oo00o , i1i11Ii1 in OOOo0O :
ooOOo0o = oOoI1IiII11II11 . rloc . print_address_no_iid ( )
if 55 - 55: OoooooooOO + oO0o . o0oOOo0O0Ooo % iIii1I11I1II1 - I1Ii111
if 40 - 40: I1IiiI . o0oOOo0O0Ooo - Oo0Ooo
if 44 - 44: Ii1I % OoO0O00 * oO0o * OoO0O00
if 7 - 7: I1Ii111 % i1IIi . I11i . O0 / i1IIi
ooO0oO , o0o0 = lisp_allow_gleaning ( Oo00o , oOoI1IiII11II11 )
if ( ooO0oO and o0o0 == False ) :
Oo0ooo0Ooo = green ( Oo00o . print_address ( ) , False )
ooOOo0o += ":{}" . format ( oOoI1IiII11II11 . translated_port )
lprint ( "Suppress probe to RLOC {} for gleaned EID {}" . format ( red ( ooOOo0o , False ) , Oo0ooo0Ooo ) )
if 30 - 30: O0 / I1Ii111
continue
if 92 - 92: o0oOOo0O0Ooo + ooOoO0o / Oo0Ooo % o0oOOo0O0Ooo . OoooooooOO * Oo0Ooo
if 68 - 68: I11i . OOooOOo - oO0o % Oo0Ooo
if 27 - 27: I1ii11iIi11i % OoO0O00 % I11i / I1Ii111 / i11iIiiIii
if 75 - 75: O0 % I1Ii111 * I11i % o0oOOo0O0Ooo - I11i
if 51 - 51: II111iiii % ooOoO0o
if 89 - 89: iII111i % OoooooooOO / I1ii11iIi11i
if 64 - 64: OoooooooOO
if ( oOoI1IiII11II11 . down_state ( ) ) : continue
if 41 - 41: Ii1I . I11i / oO0o * OoooooooOO
if 98 - 98: I1ii11iIi11i - O0 + i11iIiiIii
if 71 - 71: O0 - OoooooooOO
if 82 - 82: i11iIiiIii * II111iiii % IiII
if 80 - 80: Ii1I . i11iIiiIii % oO0o * o0oOOo0O0Ooo
if 56 - 56: I1Ii111 % iII111i / II111iiii - Oo0Ooo - Oo0Ooo - iIii1I11I1II1
if 67 - 67: iII111i
if 80 - 80: Ii1I . iII111i * I1IiiI * Ii1I
if 82 - 82: OoO0O00 % OoOoOO00 * i11iIiiIii . OoO0O00 . I1ii11iIi11i + Ii1I
if 60 - 60: i1IIi / iII111i
if 10 - 10: I1Ii111 / OoOoOO00 * Ii1I % o0oOOo0O0Ooo . OoOoOO00 / I1ii11iIi11i
if ( I1oo0O0oOOoOo ) :
oOoI1IiII11II11 . last_rloc_probe_nonce = I1oo0O0oOOoOo . last_rloc_probe_nonce
if 2 - 2: iIii1I11I1II1
if ( I1oo0O0oOOoOo . translated_port == oOoI1IiII11II11 . translated_port and I1oo0O0oOOoOo . rloc_name == oOoI1IiII11II11 . rloc_name ) :
if 85 - 85: O0 - ooOoO0o
Oo0ooo0Ooo = green ( lisp_print_eid_tuple ( Oo00o , i1i11Ii1 ) , False )
lprint ( "Suppress probe to duplicate RLOC {} for {}" . format ( red ( ooOOo0o , False ) , Oo0ooo0Ooo ) )
if 35 - 35: o0oOOo0O0Ooo - I1IiiI
continue
if 47 - 47: i11iIiiIii * iII111i . OoOoOO00 * I1Ii111 % i11iIiiIii + Ii1I
if 65 - 65: Ii1I % i11iIiiIii
if 98 - 98: iII111i * o0oOOo0O0Ooo % Oo0Ooo
O0o0 = None
Oo0o0o0oo = None
while ( True ) :
Oo0o0o0oo = oOoI1IiII11II11 if Oo0o0o0oo == None else Oo0o0o0oo . next_rloc
if ( Oo0o0o0oo == None ) : break
if 7 - 7: oO0o * OoooooooOO % o0oOOo0O0Ooo . I1Ii111 + O0
if 14 - 14: I11i * II111iiii % o0oOOo0O0Ooo / iII111i . OoooooooOO % iII111i
if 88 - 88: iII111i
if 94 - 94: OoooooooOO
if 32 - 32: I1ii11iIi11i
if ( Oo0o0o0oo . rloc_next_hop != None ) :
if ( Oo0o0o0oo . rloc_next_hop not in I1i1 ) :
if ( Oo0o0o0oo . up_state ( ) ) :
i1 , oOO0OoOoOoo = Oo0o0o0oo . rloc_next_hop
Oo0o0o0oo . state = LISP_RLOC_UNREACH_STATE
Oo0o0o0oo . last_state_change = lisp_get_timestamp ( )
lisp_update_rtr_updown ( Oo0o0o0oo . rloc , False )
if 8 - 8: I11i * i11iIiiIii - ooOoO0o
iI1i = bold ( "unreachable" , False )
lprint ( "Next-hop {}({}) for RLOC {} is {}" . format ( oOO0OoOoOoo , i1 ,
red ( ooOOo0o , False ) , iI1i ) )
continue
if 47 - 47: ooOoO0o . I1IiiI / i11iIiiIii * iII111i * I1IiiI
if 8 - 8: oO0o % oO0o . iII111i / i1IIi % IiII
if 71 - 71: OoOoOO00 + oO0o % O0 + Oo0Ooo
if 62 - 62: i1IIi . Ii1I * i1IIi * O0 . I1IiiI % o0oOOo0O0Ooo
if 16 - 16: I11i . Ii1I - ooOoO0o . OOooOOo % O0 / oO0o
if 42 - 42: II111iiii . iII111i
i1OooO00oO00o = Oo0o0o0oo . last_rloc_probe
Oooo00OO = 0 if i1OooO00oO00o == None else time . time ( ) - i1OooO00oO00o
if ( Oo0o0o0oo . unreach_state ( ) and Oooo00OO < LISP_RLOC_PROBE_INTERVAL ) :
lprint ( "Waiting for probe-reply from RLOC {}" . format ( red ( ooOOo0o , False ) ) )
if 25 - 25: IiII - IiII
continue
if 11 - 11: I1IiiI + o0oOOo0O0Ooo / O0 + Ii1I % I11i
if 50 - 50: iII111i * OoooooooOO . O0
if 87 - 87: ooOoO0o / Ii1I % O0 . OoO0O00
if 55 - 55: i1IIi . o0oOOo0O0Ooo % OoooooooOO + II111iiii . OoOoOO00
if 32 - 32: IiII * I1Ii111 * Oo0Ooo . i1IIi * OoooooooOO
if 12 - 12: I1IiiI . OOooOOo % Oo0Ooo
IiIii1i11i1 = lisp_get_echo_nonce ( None , ooOOo0o )
if ( IiIii1i11i1 and IiIii1i11i1 . request_nonce_timeout ( ) ) :
Oo0o0o0oo . state = LISP_RLOC_NO_ECHOED_NONCE_STATE
Oo0o0o0oo . last_state_change = lisp_get_timestamp ( )
iI1i = bold ( "unreachable" , False )
lprint ( "RLOC {} went {}, nonce-echo failed" . format ( red ( ooOOo0o , False ) , iI1i ) )
if 86 - 86: i11iIiiIii
lisp_update_rtr_updown ( Oo0o0o0oo . rloc , False )
continue
if 57 - 57: iII111i - OoooooooOO - ooOoO0o % II111iiii
if 62 - 62: i11iIiiIii . Oo0Ooo / Oo0Ooo . IiII . OoooooooOO
if 86 - 86: I1ii11iIi11i * OoOoOO00 + iII111i
if 79 - 79: I11i - II111iiii
if 27 - 27: I1IiiI + o0oOOo0O0Ooo * oO0o % I1IiiI
if 66 - 66: OoO0O00 + IiII . o0oOOo0O0Ooo . IiII
if ( IiIii1i11i1 and IiIii1i11i1 . recently_echoed ( ) ) :
lprint ( ( "Suppress RLOC-probe to {}, nonce-echo " + "received" ) . format ( red ( ooOOo0o , False ) ) )
if 88 - 88: oO0o + oO0o % OoO0O00 . OoooooooOO - OoooooooOO . Oo0Ooo
continue
if 44 - 44: I1IiiI * IiII . OoooooooOO
if 62 - 62: I11i - Ii1I / i11iIiiIii * I1IiiI + ooOoO0o + o0oOOo0O0Ooo
if 10 - 10: i1IIi + o0oOOo0O0Ooo
if 47 - 47: OOooOOo * IiII % I1Ii111 . OoOoOO00 - OoooooooOO / OoooooooOO
if 79 - 79: I11i % i11iIiiIii % I1IiiI . OoooooooOO * oO0o . Ii1I
if 14 - 14: iIii1I11I1II1 / I11i - o0oOOo0O0Ooo / IiII / o0oOOo0O0Ooo . OoO0O00
if ( Oo0o0o0oo . last_rloc_probe != None ) :
i1OooO00oO00o = Oo0o0o0oo . last_rloc_probe_reply
if ( i1OooO00oO00o == None ) : i1OooO00oO00o = 0
Oooo00OO = time . time ( ) - i1OooO00oO00o
if ( Oo0o0o0oo . up_state ( ) and Oooo00OO >= LISP_RLOC_PROBE_REPLY_WAIT ) :
if 2 - 2: I11i
Oo0o0o0oo . state = LISP_RLOC_UNREACH_STATE
Oo0o0o0oo . last_state_change = lisp_get_timestamp ( )
lisp_update_rtr_updown ( Oo0o0o0oo . rloc , False )
iI1i = bold ( "unreachable" , False )
lprint ( "RLOC {} went {}, probe it" . format ( red ( ooOOo0o , False ) , iI1i ) )
if 12 - 12: i1IIi . I1Ii111
if 99 - 99: Oo0Ooo / i11iIiiIii
lisp_mark_rlocs_for_other_eids ( OOOo0O )
if 81 - 81: Ii1I . i1IIi % iII111i . OoO0O00 % IiII
if 42 - 42: iII111i / Oo0Ooo
if 14 - 14: O0 . Oo0Ooo
Oo0o0o0oo . last_rloc_probe = lisp_get_timestamp ( )
if 8 - 8: i11iIiiIii
oO0oo0o = "" if Oo0o0o0oo . unreach_state ( ) == False else " unreachable"
if 36 - 36: O0 + OOooOOo * i1IIi - OoooooooOO * iII111i
if 8 - 8: OoooooooOO * i11iIiiIii * iII111i * O0 - OoOoOO00
if 3 - 3: OoooooooOO % oO0o + OoOoOO00 % I1IiiI
if 50 - 50: OoO0O00 - Oo0Ooo
if 13 - 13: OoOoOO00
if 72 - 72: II111iiii * iII111i . II111iiii + iII111i * IiII
if 90 - 90: oO0o * I1Ii111 / O0
IIiii1IiiIiii = ""
oOO0OoOoOoo = None
if ( Oo0o0o0oo . rloc_next_hop != None ) :
i1 , oOO0OoOoOoo = Oo0o0o0oo . rloc_next_hop
lisp_install_host_route ( ooOOo0o , oOO0OoOoOoo , True )
IIiii1IiiIiii = ", send on nh {}({})" . format ( oOO0OoOoOoo , i1 )
if 81 - 81: I11i
if 31 - 31: OoooooooOO - OoO0O00 . iIii1I11I1II1 % I1IiiI
if 98 - 98: I1IiiI + Ii1I
if 7 - 7: o0oOOo0O0Ooo . OoooooooOO
if 32 - 32: I1ii11iIi11i
oooOoo = Oo0o0o0oo . print_rloc_probe_rtt ( )
I1iIIii111i11i11 = ooOOo0o
if ( Oo0o0o0oo . translated_port != 0 ) :
I1iIIii111i11i11 += ":{}" . format ( Oo0o0o0oo . translated_port )
if 10 - 10: I1IiiI % I1Ii111 . IiII - OOooOOo
I1iIIii111i11i11 = red ( I1iIIii111i11i11 , False )
if ( Oo0o0o0oo . rloc_name != None ) :
I1iIIii111i11i11 += " (" + blue ( Oo0o0o0oo . rloc_name , False ) + ")"
if 93 - 93: iIii1I11I1II1
lprint ( "Send {}{} {}, last rtt: {}{}" . format ( oo00OO0Oooo , oO0oo0o ,
I1iIIii111i11i11 , oooOoo , IIiii1IiiIiii ) )
if 33 - 33: OOooOOo . i1IIi
if 63 - 63: II111iiii . oO0o * IiII
if 73 - 73: iII111i . i1IIi + oO0o + OOooOOo + ooOoO0o - iIii1I11I1II1
if 47 - 47: I11i
if 88 - 88: OoO0O00 - OoooooooOO
if 93 - 93: Oo0Ooo * I1IiiI
if 60 - 60: I1Ii111 + OOooOOo % iII111i
if 40 - 40: I11i + oO0o . O0 % oO0o
if ( Oo0o0o0oo . rloc_next_hop != None ) :
O0o0 = lisp_get_host_route_next_hop ( ooOOo0o )
if ( O0o0 ) : lisp_install_host_route ( ooOOo0o , O0o0 , False )
if 12 - 12: iIii1I11I1II1
if 9 - 9: OoOoOO00 * II111iiii / o0oOOo0O0Ooo * iII111i - II111iiii / i11iIiiIii
if 14 - 14: i11iIiiIii + I1Ii111 . OoOoOO00 - oO0o * OoO0O00
if 23 - 23: iIii1I11I1II1
if 32 - 32: iII111i * iIii1I11I1II1 + I1Ii111 + IiII + O0 * OoO0O00
if 100 - 100: II111iiii
if ( Oo0o0o0oo . rloc . is_null ( ) ) :
Oo0o0o0oo . rloc . copy_address ( oOoI1IiII11II11 . rloc )
if 34 - 34: I11i % OOooOOo - iII111i % II111iiii
if 14 - 14: I11i * o0oOOo0O0Ooo % II111iiii
if 36 - 36: ooOoO0o - iIii1I11I1II1 / IiII + OoOoOO00
if 42 - 42: ooOoO0o + I1IiiI * iII111i / OoOoOO00 . i1IIi - OoooooooOO
if 8 - 8: iIii1I11I1II1 - Oo0Ooo + iII111i
oOoO = None if ( i1i11Ii1 . is_null ( ) ) else Oo00o
iII1I1iiII11I = Oo00o if ( i1i11Ii1 . is_null ( ) ) else i1i11Ii1
lisp_send_map_request ( lisp_sockets , 0 , oOoO , iII1I1iiII11I , Oo0o0o0oo )
I1oo0O0oOOoOo = oOoI1IiII11II11
if 44 - 44: iII111i / Oo0Ooo / IiII / i11iIiiIii - i11iIiiIii
if 14 - 14: i1IIi
if 19 - 19: I1IiiI * OoO0O00 * O0 - i11iIiiIii - ooOoO0o - I11i
if 47 - 47: iIii1I11I1II1
if ( oOO0OoOoOoo ) : lisp_install_host_route ( ooOOo0o , oOO0OoOoOoo , False )
if 64 - 64: OoooooooOO . Ii1I
if 38 - 38: Oo0Ooo
if 64 - 64: ooOoO0o % i11iIiiIii
if 10 - 10: Ii1I % oO0o + oO0o * OoOoOO00 % iII111i / o0oOOo0O0Ooo
if 17 - 17: iII111i / I1IiiI . II111iiii - OoO0O00 + iII111i
if ( O0o0 ) : lisp_install_host_route ( ooOOo0o , O0o0 , True )
if 22 - 22: Oo0Ooo - I1ii11iIi11i + I11i . oO0o
if 85 - 85: iIii1I11I1II1 / Ii1I
if 43 - 43: I1IiiI % I1Ii111 - oO0o . II111iiii / iIii1I11I1II1
if 97 - 97: I1Ii111 + I1ii11iIi11i
i1Ii11II += 1
if ( ( i1Ii11II % 10 ) == 0 ) : time . sleep ( 0.020 )
if 21 - 21: O0 + o0oOOo0O0Ooo * OoooooooOO % IiII % I1ii11iIi11i
if 80 - 80: I11i
if 28 - 28: OoOoOO00 * OoooooooOO * i11iIiiIii
lprint ( "---------- End RLOC Probing ----------" )
return
if 88 - 88: ooOoO0o + ooOoO0o / I1Ii111
if 69 - 69: O0 * o0oOOo0O0Ooo + i1IIi * ooOoO0o . o0oOOo0O0Ooo
if 46 - 46: Oo0Ooo / Oo0Ooo * IiII
if 65 - 65: iIii1I11I1II1 * o0oOOo0O0Ooo - iII111i % II111iiii - I1ii11iIi11i
if 65 - 65: I11i
if 92 - 92: iII111i . IiII + i1IIi % i1IIi
if 11 - 11: I1ii11iIi11i + iIii1I11I1II1 - I1Ii111 * iIii1I11I1II1 * IiII + oO0o
if 6 - 6: I1Ii111 * OOooOOo + i1IIi - Ii1I / oO0o
def lisp_update_rtr_updown ( rtr , updown ) :
global lisp_ipc_socket
if 81 - 81: I1Ii111 % oO0o * i1IIi * OoooooooOO / Oo0Ooo
if 70 - 70: I1IiiI
if 35 - 35: i11iIiiIii
if 59 - 59: ooOoO0o . iII111i - II111iiii
if ( lisp_i_am_itr == False ) : return
if 30 - 30: o0oOOo0O0Ooo % iII111i - i11iIiiIii
if 25 - 25: i11iIiiIii + OoOoOO00 + oO0o / Ii1I * Oo0Ooo + Oo0Ooo
if 26 - 26: I1IiiI % I1ii11iIi11i + o0oOOo0O0Ooo / I1ii11iIi11i - I1IiiI
if 55 - 55: OoooooooOO
if 2 - 2: Oo0Ooo + I11i / OOooOOo + OOooOOo
if ( lisp_register_all_rtrs ) : return
if 62 - 62: OOooOOo . iIii1I11I1II1 + I1IiiI / OOooOOo
oo0OOoOOOO = rtr . print_address_no_iid ( )
if 38 - 38: O0 * iIii1I11I1II1 - oO0o
if 38 - 38: iIii1I11I1II1 / I1Ii111 + ooOoO0o . II111iiii - iIii1I11I1II1
if 13 - 13: Ii1I
if 34 - 34: I1IiiI / iIii1I11I1II1
if 35 - 35: oO0o / oO0o
if ( lisp_rtr_list . has_key ( oo0OOoOOOO ) == False ) : return
if 86 - 86: o0oOOo0O0Ooo . Oo0Ooo - Ii1I / i11iIiiIii
updown = "up" if updown else "down"
lprint ( "Send ETR IPC message, RTR {} has done {}" . format (
red ( oo0OOoOOOO , False ) , bold ( updown , False ) ) )
if 63 - 63: oO0o - O0 + I1ii11iIi11i + Ii1I / i1IIi
if 77 - 77: O0
if 49 - 49: o0oOOo0O0Ooo / i11iIiiIii
if 36 - 36: II111iiii
oOooOOoo = "rtr%{}%{}" . format ( oo0OOoOOOO , updown )
oOooOOoo = lisp_command_ipc ( oOooOOoo , "lisp-itr" )
lisp_ipc ( oOooOOoo , lisp_ipc_socket , "lisp-etr" )
return
if 78 - 78: OoO0O00 + iIii1I11I1II1 * i1IIi
if 7 - 7: i11iIiiIii
if 49 - 49: I1IiiI - oO0o % OOooOOo / O0 / II111iiii
if 41 - 41: IiII % II111iiii
if 99 - 99: IiII - O0
if 59 - 59: iII111i % O0 + OOooOOo * ooOoO0o
if 27 - 27: I1Ii111 % i11iIiiIii * I1IiiI
def lisp_process_rloc_probe_reply ( rloc , source , port , nonce , hop_count , ttl ) :
oo00OO0Oooo = bold ( "RLOC-probe reply" , False )
IIII = rloc . print_address_no_iid ( )
Ii1II1 = source . print_address_no_iid ( )
OOIii = lisp_rloc_probe_list
if 72 - 72: I1Ii111 % OOooOOo
if 32 - 32: i1IIi
if 64 - 64: I1Ii111 * iII111i
if 79 - 79: I1Ii111 + I1Ii111
if 49 - 49: O0 * I11i + O0 / I11i
if 72 - 72: iII111i + iII111i + I1Ii111 * o0oOOo0O0Ooo - IiII
iIiIi1iI11iiI = IIII
if ( OOIii . has_key ( iIiIi1iI11iiI ) == False ) :
iIiIi1iI11iiI += ":" + str ( port )
if ( OOIii . has_key ( iIiIi1iI11iiI ) == False ) :
iIiIi1iI11iiI = Ii1II1
if ( OOIii . has_key ( iIiIi1iI11iiI ) == False ) :
iIiIi1iI11iiI += ":" + str ( port )
lprint ( " Received unsolicited {} from {}/{}, port {}" . format ( oo00OO0Oooo , red ( IIII , False ) , red ( Ii1II1 ,
# o0oOOo0O0Ooo
False ) , port ) )
return
if 42 - 42: Ii1I - IiII - i1IIi + I11i / OOooOOo - iII111i
if 19 - 19: i1IIi
if 32 - 32: I1IiiI
if 97 - 97: iII111i
if 26 - 26: i1IIi - I1Ii111 - ooOoO0o
if 73 - 73: o0oOOo0O0Ooo . OoooooooOO
if 96 - 96: i1IIi - OOooOOo / I11i % OoOoOO00 - i11iIiiIii % II111iiii
if 47 - 47: I1Ii111 * iII111i
for rloc , Oo00o , i1i11Ii1 in lisp_rloc_probe_list [ iIiIi1iI11iiI ] :
if ( lisp_i_am_rtr and rloc . translated_port != 0 and
rloc . translated_port != port ) : continue
if 90 - 90: i1IIi * Ii1I . OoO0O00 % I11i * ooOoO0o . OOooOOo
rloc . process_rloc_probe_reply ( nonce , Oo00o , i1i11Ii1 , hop_count , ttl )
if 76 - 76: iIii1I11I1II1 . i11iIiiIii * II111iiii - iII111i
return
if 51 - 51: I1IiiI
if 52 - 52: I1Ii111
if 82 - 82: iII111i + II111iiii
if 29 - 29: O0 % Ii1I * ooOoO0o % O0
if 83 - 83: oO0o
if 95 - 95: Oo0Ooo * O0 % i1IIi / iII111i + oO0o
if 85 - 85: iIii1I11I1II1 / I11i
if 65 - 65: I11i / i1IIi * OoOoOO00 * Ii1I * OoO0O00
def lisp_db_list_length ( ) :
i1Ii11II = 0
for iIiIIi1i in lisp_db_list :
i1Ii11II += len ( iIiIIi1i . dynamic_eids ) if iIiIIi1i . dynamic_eid_configured ( ) else 1
i1Ii11II += len ( iIiIIi1i . eid . iid_list )
if 74 - 74: I1ii11iIi11i . I1ii11iIi11i % IiII + OOooOOo . OoO0O00 * I11i
return ( i1Ii11II )
if 20 - 20: OOooOOo % i1IIi * Ii1I / i11iIiiIii
if 89 - 89: ooOoO0o
if 83 - 83: I11i . I11i * OOooOOo - OOooOOo
if 46 - 46: iIii1I11I1II1 . I1Ii111 % I1IiiI
if 22 - 22: i1IIi * I11i + II111iiii + II111iiii
if 20 - 20: I11i
if 37 - 37: I1Ii111
if 19 - 19: I1ii11iIi11i / OOooOOo . I1IiiI / ooOoO0o + OoO0O00 + i11iIiiIii
def lisp_is_myeid ( eid ) :
for iIiIIi1i in lisp_db_list :
if ( eid . is_more_specific ( iIiIIi1i . eid ) ) : return ( True )
if 80 - 80: OoO0O00 . O0 / Ii1I % I1Ii111 / iII111i * I1IiiI
return ( False )
if 41 - 41: O0 / OoooooooOO - i1IIi
if 6 - 6: i1IIi - I1ii11iIi11i % I1Ii111 - II111iiii / ooOoO0o / i11iIiiIii
if 32 - 32: oO0o / IiII - I11i . ooOoO0o
if 69 - 69: i11iIiiIii * i11iIiiIii
if 100 - 100: I1ii11iIi11i * I1ii11iIi11i + i1IIi
if 96 - 96: I1Ii111 / I1IiiI + ooOoO0o
if 16 - 16: I1ii11iIi11i % o0oOOo0O0Ooo % OOooOOo % OoOoOO00 + ooOoO0o % I1ii11iIi11i
if 85 - 85: oO0o * OoooooooOO * iIii1I11I1II1 + iII111i
if 67 - 67: Ii1I / i11iIiiIii % OoOoOO00 % O0 / OoOoOO00
def lisp_format_macs ( sa , da ) :
sa = sa [ 0 : 4 ] + "-" + sa [ 4 : 8 ] + "-" + sa [ 8 : 12 ]
da = da [ 0 : 4 ] + "-" + da [ 4 : 8 ] + "-" + da [ 8 : 12 ]
return ( "{} -> {}" . format ( sa , da ) )
if 54 - 54: I11i . OoOoOO00 / II111iiii . i1IIi + OOooOOo % II111iiii
if 82 - 82: i11iIiiIii . OoooooooOO % OoOoOO00 * O0 - I1Ii111
if 78 - 78: OoOoOO00 % Ii1I % OOooOOo % Oo0Ooo % I11i . Ii1I
if 73 - 73: OoooooooOO / i1IIi . iIii1I11I1II1
if 89 - 89: I1Ii111
if 29 - 29: I11i * ooOoO0o - OoooooooOO
if 92 - 92: O0 % i1IIi / OOooOOo - oO0o
def lisp_get_echo_nonce ( rloc , rloc_str ) :
if ( lisp_nonce_echoing == False ) : return ( None )
if 83 - 83: o0oOOo0O0Ooo . OoO0O00 % iIii1I11I1II1 % OoOoOO00 - i11iIiiIii
if ( rloc ) : rloc_str = rloc . print_address_no_iid ( )
IiIii1i11i1 = None
if ( lisp_nonce_echo_list . has_key ( rloc_str ) ) :
IiIii1i11i1 = lisp_nonce_echo_list [ rloc_str ]
if 71 - 71: I1ii11iIi11i - II111iiii / O0 % i1IIi + oO0o
return ( IiIii1i11i1 )
if 73 - 73: OoooooooOO
if 25 - 25: i1IIi . II111iiii . I1Ii111
if 81 - 81: II111iiii + OoOoOO00 * II111iiii / iIii1I11I1II1 - Oo0Ooo % oO0o
if 66 - 66: ooOoO0o % O0 + iIii1I11I1II1 * I1Ii111 - I1Ii111
if 61 - 61: I1ii11iIi11i
if 12 - 12: OoO0O00
if 97 - 97: OOooOOo . Oo0Ooo . oO0o * i1IIi
if 7 - 7: Oo0Ooo
def lisp_decode_dist_name ( packet ) :
i1Ii11II = 0
iIIi111iI = ""
if 1 - 1: II111iiii % oO0o . IiII
while ( packet [ 0 : 1 ] != "\0" ) :
if ( i1Ii11II == 255 ) : return ( [ None , None ] )
iIIi111iI += packet [ 0 : 1 ]
packet = packet [ 1 : : ]
i1Ii11II += 1
if 85 - 85: oO0o % iII111i + IiII + I1Ii111
if 5 - 5: O0 . I11i % i11iIiiIii - i1IIi . OOooOOo
packet = packet [ 1 : : ]
return ( packet , iIIi111iI )
if 25 - 25: OOooOOo / II111iiii % OoO0O00 / Oo0Ooo * Ii1I
if 40 - 40: IiII * Oo0Ooo . OoooooooOO * I1Ii111 / I1Ii111
if 17 - 17: oO0o * OOooOOo . II111iiii - I11i - i11iIiiIii % I1Ii111
if 38 - 38: OoOoOO00
if 20 - 20: ooOoO0o . i11iIiiIii + oO0o + ooOoO0o . OoO0O00 % iII111i
if 38 - 38: I11i + I11i - Oo0Ooo . oO0o * OoooooooOO
if 72 - 72: Oo0Ooo / II111iiii
if 66 - 66: I11i / ooOoO0o / OOooOOo % ooOoO0o
def lisp_write_flow_log ( flow_log ) :
Iiooo000o0OoOo = open ( "./logs/lisp-flow.log" , "a" )
if 6 - 6: o0oOOo0O0Ooo / ooOoO0o + OOooOOo / I1ii11iIi11i % I1Ii111
i1Ii11II = 0
for Oo0OO0 in flow_log :
oOo = Oo0OO0 [ 3 ]
oO0OoOOoOO = oOo . print_flow ( Oo0OO0 [ 0 ] , Oo0OO0 [ 1 ] , Oo0OO0 [ 2 ] )
Iiooo000o0OoOo . write ( oO0OoOOoOO )
i1Ii11II += 1
if 58 - 58: I11i . I11i + O0 / I1IiiI
Iiooo000o0OoOo . close ( )
del ( flow_log )
if 45 - 45: OoooooooOO * II111iiii
i1Ii11II = bold ( str ( i1Ii11II ) , False )
lprint ( "Wrote {} flow entries to ./logs/lisp-flow.log" . format ( i1Ii11II ) )
return
if 28 - 28: I1ii11iIi11i
if 85 - 85: o0oOOo0O0Ooo
if 20 - 20: OoooooooOO . ooOoO0o + ooOoO0o
if 7 - 7: OoO0O00 / IiII - OoO0O00 . OOooOOo
if 56 - 56: iIii1I11I1II1 / O0 + Oo0Ooo
if 5 - 5: O0 / i11iIiiIii * I1IiiI % IiII * OoO0O00
if 67 - 67: I1Ii111 . iII111i + Oo0Ooo / i11iIiiIii
def lisp_policy_command ( kv_pair ) :
i111 = lisp_policy ( "" )
iiI1IIi1III = None
if 27 - 27: OOooOOo
oooo0oO0O = [ ]
for II11iIII1i1I in range ( len ( kv_pair [ "datetime-range" ] ) ) :
oooo0oO0O . append ( lisp_policy_match ( ) )
if 65 - 65: iII111i - O0 * iIii1I11I1II1 + oO0o + i1IIi
if 87 - 87: IiII % IiII
for oOi1I1 in kv_pair . keys ( ) :
ooOo0O0O0oOO0 = kv_pair [ oOi1I1 ]
if 85 - 85: oO0o
if 14 - 14: IiII / iIii1I11I1II1 . OoooooooOO
if 14 - 14: IiII * OoooooooOO - iIii1I11I1II1
if 11 - 11: I1IiiI + Oo0Ooo % I1Ii111 * Ii1I - iIii1I11I1II1 % I1ii11iIi11i
if ( oOi1I1 == "instance-id" ) :
for II11iIII1i1I in range ( len ( oooo0oO0O ) ) :
IIiIi1IIiI1i = ooOo0O0O0oOO0 [ II11iIII1i1I ]
if ( IIiIi1IIiI1i == "" ) : continue
i1II = oooo0oO0O [ II11iIII1i1I ]
if ( i1II . source_eid == None ) :
i1II . source_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 89 - 89: Oo0Ooo + i1IIi
if ( i1II . dest_eid == None ) :
i1II . dest_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 60 - 60: I1ii11iIi11i - I1IiiI * I1Ii111 * I1Ii111 / OoooooooOO
i1II . source_eid . instance_id = int ( IIiIi1IIiI1i )
i1II . dest_eid . instance_id = int ( IIiIi1IIiI1i )
if 17 - 17: i1IIi - ooOoO0o
if 86 - 86: I1ii11iIi11i . o0oOOo0O0Ooo
if ( oOi1I1 == "source-eid" ) :
for II11iIII1i1I in range ( len ( oooo0oO0O ) ) :
IIiIi1IIiI1i = ooOo0O0O0oOO0 [ II11iIII1i1I ]
if ( IIiIi1IIiI1i == "" ) : continue
i1II = oooo0oO0O [ II11iIII1i1I ]
if ( i1II . source_eid == None ) :
i1II . source_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 30 - 30: o0oOOo0O0Ooo / i11iIiiIii
II1 = i1II . source_eid . instance_id
i1II . source_eid . store_prefix ( IIiIi1IIiI1i )
i1II . source_eid . instance_id = II1
if 33 - 33: OOooOOo % OoooooooOO
if 98 - 98: Ii1I
if ( oOi1I1 == "destination-eid" ) :
for II11iIII1i1I in range ( len ( oooo0oO0O ) ) :
IIiIi1IIiI1i = ooOo0O0O0oOO0 [ II11iIII1i1I ]
if ( IIiIi1IIiI1i == "" ) : continue
i1II = oooo0oO0O [ II11iIII1i1I ]
if ( i1II . dest_eid == None ) :
i1II . dest_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 38 - 38: ooOoO0o - iII111i * OOooOOo % I1ii11iIi11i + Oo0Ooo
II1 = i1II . dest_eid . instance_id
i1II . dest_eid . store_prefix ( IIiIi1IIiI1i )
i1II . dest_eid . instance_id = II1
if 95 - 95: iIii1I11I1II1 / O0 % O0
if 53 - 53: ooOoO0o . ooOoO0o
if ( oOi1I1 == "source-rloc" ) :
for II11iIII1i1I in range ( len ( oooo0oO0O ) ) :
IIiIi1IIiI1i = ooOo0O0O0oOO0 [ II11iIII1i1I ]
if ( IIiIi1IIiI1i == "" ) : continue
i1II = oooo0oO0O [ II11iIII1i1I ]
i1II . source_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
i1II . source_rloc . store_prefix ( IIiIi1IIiI1i )
if 80 - 80: i11iIiiIii % I1Ii111 % I1IiiI / I1IiiI + oO0o + iII111i
if 18 - 18: OoO0O00 * ooOoO0o
if ( oOi1I1 == "destination-rloc" ) :
for II11iIII1i1I in range ( len ( oooo0oO0O ) ) :
IIiIi1IIiI1i = ooOo0O0O0oOO0 [ II11iIII1i1I ]
if ( IIiIi1IIiI1i == "" ) : continue
i1II = oooo0oO0O [ II11iIII1i1I ]
i1II . dest_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
i1II . dest_rloc . store_prefix ( IIiIi1IIiI1i )
if 32 - 32: oO0o . OoooooooOO - o0oOOo0O0Ooo + II111iiii
if 4 - 4: OOooOOo * I1IiiI - I11i - I11i
if ( oOi1I1 == "rloc-record-name" ) :
for II11iIII1i1I in range ( len ( oooo0oO0O ) ) :
IIiIi1IIiI1i = ooOo0O0O0oOO0 [ II11iIII1i1I ]
if ( IIiIi1IIiI1i == "" ) : continue
i1II = oooo0oO0O [ II11iIII1i1I ]
i1II . rloc_record_name = IIiIi1IIiI1i
if 67 - 67: I1IiiI
if 32 - 32: oO0o * i11iIiiIii - I11i % Oo0Ooo * I1ii11iIi11i
if ( oOi1I1 == "geo-name" ) :
for II11iIII1i1I in range ( len ( oooo0oO0O ) ) :
IIiIi1IIiI1i = ooOo0O0O0oOO0 [ II11iIII1i1I ]
if ( IIiIi1IIiI1i == "" ) : continue
i1II = oooo0oO0O [ II11iIII1i1I ]
i1II . geo_name = IIiIi1IIiI1i
if 79 - 79: II111iiii / Oo0Ooo / I1ii11iIi11i
if 30 - 30: I11i . o0oOOo0O0Ooo / II111iiii
if ( oOi1I1 == "elp-name" ) :
for II11iIII1i1I in range ( len ( oooo0oO0O ) ) :
IIiIi1IIiI1i = ooOo0O0O0oOO0 [ II11iIII1i1I ]
if ( IIiIi1IIiI1i == "" ) : continue
i1II = oooo0oO0O [ II11iIII1i1I ]
i1II . elp_name = IIiIi1IIiI1i
if 59 - 59: i11iIiiIii
if 5 - 5: i11iIiiIii + o0oOOo0O0Ooo . OoO0O00 % OoOoOO00 + I11i
if ( oOi1I1 == "rle-name" ) :
for II11iIII1i1I in range ( len ( oooo0oO0O ) ) :
IIiIi1IIiI1i = ooOo0O0O0oOO0 [ II11iIII1i1I ]
if ( IIiIi1IIiI1i == "" ) : continue
i1II = oooo0oO0O [ II11iIII1i1I ]
i1II . rle_name = IIiIi1IIiI1i
if 59 - 59: I1ii11iIi11i
if 47 - 47: I1IiiI + Oo0Ooo
if ( oOi1I1 == "json-name" ) :
for II11iIII1i1I in range ( len ( oooo0oO0O ) ) :
IIiIi1IIiI1i = ooOo0O0O0oOO0 [ II11iIII1i1I ]
if ( IIiIi1IIiI1i == "" ) : continue
i1II = oooo0oO0O [ II11iIII1i1I ]
i1II . json_name = IIiIi1IIiI1i
if 78 - 78: i1IIi / I1ii11iIi11i % ooOoO0o * OoO0O00
if 10 - 10: i1IIi % ooOoO0o / iII111i
if ( oOi1I1 == "datetime-range" ) :
for II11iIII1i1I in range ( len ( oooo0oO0O ) ) :
IIiIi1IIiI1i = ooOo0O0O0oOO0 [ II11iIII1i1I ]
i1II = oooo0oO0O [ II11iIII1i1I ]
if ( IIiIi1IIiI1i == "" ) : continue
II1Ooo0000o00OO = lisp_datetime ( IIiIi1IIiI1i [ 0 : 19 ] )
iIiooooOooOO0 = lisp_datetime ( IIiIi1IIiI1i [ 19 : : ] )
if ( II1Ooo0000o00OO . valid_datetime ( ) and iIiooooOooOO0 . valid_datetime ( ) ) :
i1II . datetime_lower = II1Ooo0000o00OO
i1II . datetime_upper = iIiooooOooOO0
if 98 - 98: IiII / o0oOOo0O0Ooo - i1IIi - OOooOOo
if 65 - 65: Ii1I + OoOoOO00 * Oo0Ooo . O0 . IiII
if 33 - 33: i11iIiiIii . i1IIi . I1Ii111 - OoOoOO00 + OOooOOo
if 34 - 34: I1ii11iIi11i . i1IIi * O0 / OoooooooOO
if 22 - 22: OOooOOo % o0oOOo0O0Ooo - i11iIiiIii
if 58 - 58: IiII . Ii1I + II111iiii
if 31 - 31: i11iIiiIii + i11iIiiIii + I11i * Oo0Ooo . I11i
if ( oOi1I1 == "set-action" ) :
i111 . set_action = ooOo0O0O0oOO0
if 28 - 28: OOooOOo * iIii1I11I1II1 * OoOoOO00
if ( oOi1I1 == "set-record-ttl" ) :
i111 . set_record_ttl = int ( ooOo0O0O0oOO0 )
if 75 - 75: Oo0Ooo % IiII + II111iiii + oO0o
if ( oOi1I1 == "set-instance-id" ) :
if ( i111 . set_source_eid == None ) :
i111 . set_source_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 35 - 35: I1ii11iIi11i - oO0o - O0 / iII111i % IiII
if ( i111 . set_dest_eid == None ) :
i111 . set_dest_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 10 - 10: OOooOOo + oO0o - I1Ii111 . I1IiiI
iiI1IIi1III = int ( ooOo0O0O0oOO0 )
i111 . set_source_eid . instance_id = iiI1IIi1III
i111 . set_dest_eid . instance_id = iiI1IIi1III
if 11 - 11: I1ii11iIi11i . I1Ii111 / o0oOOo0O0Ooo + IiII
if ( oOi1I1 == "set-source-eid" ) :
if ( i111 . set_source_eid == None ) :
i111 . set_source_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 73 - 73: OoO0O00 . i11iIiiIii * OoO0O00 * i1IIi + I11i
i111 . set_source_eid . store_prefix ( ooOo0O0O0oOO0 )
if ( iiI1IIi1III != None ) : i111 . set_source_eid . instance_id = iiI1IIi1III
if 27 - 27: i11iIiiIii / OoOoOO00 % O0 / II111iiii . I11i - ooOoO0o
if ( oOi1I1 == "set-destination-eid" ) :
if ( i111 . set_dest_eid == None ) :
i111 . set_dest_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 54 - 54: oO0o * II111iiii
i111 . set_dest_eid . store_prefix ( ooOo0O0O0oOO0 )
if ( iiI1IIi1III != None ) : i111 . set_dest_eid . instance_id = iiI1IIi1III
if 79 - 79: o0oOOo0O0Ooo . ooOoO0o . Oo0Ooo * OoooooooOO
if ( oOi1I1 == "set-rloc-address" ) :
i111 . set_rloc_address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
i111 . set_rloc_address . store_address ( ooOo0O0O0oOO0 )
if 98 - 98: ooOoO0o
if ( oOi1I1 == "set-rloc-record-name" ) :
i111 . set_rloc_record_name = ooOo0O0O0oOO0
if 73 - 73: I1Ii111
if ( oOi1I1 == "set-elp-name" ) :
i111 . set_elp_name = ooOo0O0O0oOO0
if 97 - 97: OoO0O00 * Ii1I + Oo0Ooo
if ( oOi1I1 == "set-geo-name" ) :
i111 . set_geo_name = ooOo0O0O0oOO0
if 83 - 83: II111iiii - Oo0Ooo % II111iiii * o0oOOo0O0Ooo
if ( oOi1I1 == "set-rle-name" ) :
i111 . set_rle_name = ooOo0O0O0oOO0
if 51 - 51: iII111i * iIii1I11I1II1 % Ii1I * Ii1I + i11iIiiIii . OoooooooOO
if ( oOi1I1 == "set-json-name" ) :
i111 . set_json_name = ooOo0O0O0oOO0
if 54 - 54: i11iIiiIii . iIii1I11I1II1 * iIii1I11I1II1 + Ii1I % I11i - OoO0O00
if ( oOi1I1 == "policy-name" ) :
i111 . policy_name = ooOo0O0O0oOO0
if 16 - 16: IiII % iIii1I11I1II1 * i11iIiiIii + O0
if 76 - 76: iII111i * OOooOOo
if 7 - 7: ooOoO0o + o0oOOo0O0Ooo + o0oOOo0O0Ooo
if 73 - 73: IiII % I11i % i11iIiiIii + ooOoO0o
if 83 - 83: Ii1I * I1Ii111 * i11iIiiIii / iIii1I11I1II1 % I1ii11iIi11i
if 40 - 40: iII111i
i111 . match_clauses = oooo0oO0O
i111 . save_policy ( )
return
if 21 - 21: I1Ii111 / iII111i + Oo0Ooo / I1ii11iIi11i / I1Ii111
if 33 - 33: OoooooooOO
lisp_policy_commands = {
"lisp policy" : [ lisp_policy_command , {
"policy-name" : [ True ] ,
"match" : [ ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"source-eid" : [ True ] ,
"destination-eid" : [ True ] ,
"source-rloc" : [ True ] ,
"destination-rloc" : [ True ] ,
"rloc-record-name" : [ True ] ,
"elp-name" : [ True ] ,
"geo-name" : [ True ] ,
"rle-name" : [ True ] ,
"json-name" : [ True ] ,
"datetime-range" : [ True ] ,
"set-action" : [ False , "process" , "drop" ] ,
"set-record-ttl" : [ True , 0 , 0x7fffffff ] ,
"set-instance-id" : [ True , 0 , 0xffffffff ] ,
"set-source-eid" : [ True ] ,
"set-destination-eid" : [ True ] ,
"set-rloc-address" : [ True ] ,
"set-rloc-record-name" : [ True ] ,
"set-elp-name" : [ True ] ,
"set-geo-name" : [ True ] ,
"set-rle-name" : [ True ] ,
"set-json-name" : [ True ] } ]
}
if 59 - 59: i11iIiiIii - OoooooooOO . ooOoO0o / i11iIiiIii % iIii1I11I1II1 * I1ii11iIi11i
if 45 - 45: I1ii11iIi11i * I1ii11iIi11i
if 31 - 31: OoO0O00 - OOooOOo . iII111i * I1Ii111 * iII111i + I1ii11iIi11i
if 5 - 5: Oo0Ooo . I1Ii111
if 77 - 77: i11iIiiIii / I1Ii111 / I1ii11iIi11i % oO0o
if 83 - 83: Ii1I % iIii1I11I1II1 / I1ii11iIi11i + I11i
if 23 - 23: iIii1I11I1II1 - I1IiiI
def lisp_send_to_arista ( command , interface ) :
interface = "" if ( interface == None ) else "interface " + interface
if 51 - 51: OoooooooOO / IiII / I1ii11iIi11i . Oo0Ooo - o0oOOo0O0Ooo * OoooooooOO
IIi1 = command
if ( interface != "" ) : IIi1 = interface + ": " + IIi1
lprint ( "Send CLI command '{}' to hardware" . format ( IIi1 ) )
if 20 - 20: OoOoOO00
commands = '''
enable
configure
{}
{}
''' . format ( interface , command )
if 33 - 33: OoO0O00
os . system ( "FastCli -c '{}'" . format ( commands ) )
return
if 55 - 55: ooOoO0o + ooOoO0o
if 93 - 93: oO0o - I1IiiI / I1ii11iIi11i % o0oOOo0O0Ooo / OoooooooOO + II111iiii
if 10 - 10: o0oOOo0O0Ooo - iII111i . O0 + OoO0O00 - Oo0Ooo - i11iIiiIii
if 37 - 37: iIii1I11I1II1
if 37 - 37: II111iiii % OoOoOO00 . IiII * ooOoO0o . I1IiiI
if 25 - 25: OoooooooOO % i1IIi . I1Ii111 / OoOoOO00 - I1ii11iIi11i
if 15 - 15: iIii1I11I1II1
def lisp_arista_is_alive ( prefix ) :
iiI1i = "enable\nsh plat trident l3 software routes {}\n" . format ( prefix )
I1i = commands . getoutput ( "FastCli -c '{}'" . format ( iiI1i ) )
if 72 - 72: OoO0O00 . IiII * Ii1I - I1IiiI
if 81 - 81: oO0o . OOooOOo - Ii1I . OoOoOO00
if 100 - 100: Ii1I * i1IIi * i1IIi - iII111i + OoO0O00 + OoO0O00
if 9 - 9: oO0o / OoO0O00 . I1IiiI
I1i = I1i . split ( "\n" ) [ 1 ]
I11iIII1i1i1 = I1i . split ( " " )
I11iIII1i1i1 = I11iIII1i1i1 [ - 1 ] . replace ( "\r" , "" )
if 44 - 44: I1IiiI
if 66 - 66: o0oOOo0O0Ooo
if 40 - 40: OOooOOo * Ii1I
if 38 - 38: ooOoO0o
return ( I11iIII1i1i1 == "Y" )
if 5 - 5: OoooooooOO + iII111i - I11i
if 95 - 95: OOooOOo / i11iIiiIii - Ii1I + I1ii11iIi11i
if 7 - 7: I1ii11iIi11i
if 37 - 37: O0 . II111iiii
if 70 - 70: o0oOOo0O0Ooo / iII111i + i1IIi + I11i % iIii1I11I1II1 % Oo0Ooo
if 1 - 1: O0 + OoO0O00 . i11iIiiIii + I1Ii111 - OoO0O00 - IiII
if 1 - 1: I1ii11iIi11i / i1IIi . I1IiiI / Ii1I
if 19 - 19: iIii1I11I1II1 / Oo0Ooo . O0 - Oo0Ooo
if 74 - 74: I1ii11iIi11i * OoooooooOO . iII111i
if 45 - 45: I1IiiI - IiII % ooOoO0o - IiII . Oo0Ooo - o0oOOo0O0Ooo
if 27 - 27: iII111i
if 64 - 64: iIii1I11I1II1 - OOooOOo . iII111i % o0oOOo0O0Ooo / II111iiii % OoooooooOO
if 87 - 87: OoooooooOO
if 70 - 70: o0oOOo0O0Ooo % OoooooooOO % I1IiiI . OoOoOO00 * I1IiiI - ooOoO0o
if 92 - 92: I1IiiI . I11i
if 66 - 66: I1Ii111 / I11i / OoooooooOO % OoOoOO00 . oO0o * iII111i
if 34 - 34: I1ii11iIi11i * I1ii11iIi11i % I11i / OOooOOo % oO0o . OoOoOO00
if 25 - 25: I1ii11iIi11i / I11i + i1IIi . I1IiiI + ooOoO0o
if 29 - 29: IiII + I1ii11iIi11i
if 8 - 8: IiII % I1IiiI
if 10 - 10: OoooooooOO / OoOoOO00
if 77 - 77: OoOoOO00
if 10 - 10: IiII / i11iIiiIii
if 19 - 19: OoO0O00
if 100 - 100: I1ii11iIi11i - I1ii11iIi11i
if 38 - 38: I1Ii111
if 23 - 23: Ii1I . I1ii11iIi11i + I1Ii111 + i1IIi * o0oOOo0O0Ooo - i11iIiiIii
if 92 - 92: I1Ii111 - I1IiiI + Ii1I / iII111i % OOooOOo
if 32 - 32: i1IIi . iII111i - Ii1I % iII111i % II111iiii - oO0o
if 36 - 36: OoooooooOO * OoooooooOO . ooOoO0o . O0
if 5 - 5: I11i % I1IiiI - OoO0O00 . Oo0Ooo
if 79 - 79: iII111i + IiII % I11i . Oo0Ooo / IiII * iII111i
if 40 - 40: iII111i - I1IiiI + OoOoOO00
if 2 - 2: I11i - II111iiii / I1Ii111
if 27 - 27: OoO0O00 - I1ii11iIi11i * i11iIiiIii + Oo0Ooo
if 29 - 29: I1ii11iIi11i / IiII . I1Ii111 + Ii1I + OoO0O00
if 76 - 76: ooOoO0o . I11i * OoO0O00
if 53 - 53: II111iiii / OoOoOO00 / IiII * oO0o
if 52 - 52: O0 % iII111i * iIii1I11I1II1 / I11i / I1IiiI * ooOoO0o
if 93 - 93: iIii1I11I1II1 . II111iiii * OOooOOo - iIii1I11I1II1 . oO0o % Oo0Ooo
if 92 - 92: OoO0O00
if 42 - 42: I1ii11iIi11i - iIii1I11I1II1 % ooOoO0o
if 7 - 7: Oo0Ooo / ooOoO0o + o0oOOo0O0Ooo
if 38 - 38: o0oOOo0O0Ooo . O0 - OoO0O00 % I11i
def lisp_program_vxlan_hardware ( mc ) :
if 80 - 80: o0oOOo0O0Ooo
if 100 - 100: iIii1I11I1II1 . OoOoOO00 . OoooooooOO / I1ii11iIi11i - I1IiiI * I11i
if 5 - 5: i1IIi * o0oOOo0O0Ooo - I1Ii111 + I1IiiI - II111iiii
if 15 - 15: I1Ii111
if 38 - 38: O0
if 50 - 50: i11iIiiIii * OoO0O00 + iII111i / O0 * oO0o % ooOoO0o
if ( os . path . exists ( "/persist/local/lispers.net" ) == False ) : return
if 6 - 6: OoO0O00 . o0oOOo0O0Ooo / Ii1I + Ii1I
if 59 - 59: II111iiii - o0oOOo0O0Ooo * OoooooooOO
if 83 - 83: oO0o . iIii1I11I1II1 . iII111i % Oo0Ooo
if 48 - 48: oO0o % OoO0O00 - OoooooooOO . IiII
if ( len ( mc . best_rloc_set ) == 0 ) : return
if 11 - 11: I1Ii111 % o0oOOo0O0Ooo - o0oOOo0O0Ooo % OoooooooOO . o0oOOo0O0Ooo - I1ii11iIi11i
if 33 - 33: OoO0O00 + II111iiii . Oo0Ooo * I1Ii111
if 63 - 63: OoooooooOO + OoOoOO00 - OoooooooOO
if 54 - 54: OoO0O00 + I1IiiI % O0 + OoO0O00
iII = mc . eid . print_prefix_no_iid ( )
Oo0o0o0oo = mc . best_rloc_set [ 0 ] . rloc . print_address_no_iid ( )
if 37 - 37: II111iiii / I1ii11iIi11i * I1IiiI - OoooooooOO
if 55 - 55: IiII / ooOoO0o * I1IiiI / I1Ii111 - Oo0Ooo % o0oOOo0O0Ooo
if 82 - 82: OoO0O00 - iIii1I11I1II1 . Oo0Ooo / IiII . OoO0O00
if 47 - 47: OOooOOo + IiII
II1i1iI = commands . getoutput ( "ip route get {} | egrep vlan4094" . format ( iII ) )
if 36 - 36: i1IIi * IiII * I1ii11iIi11i
if ( II1i1iI != "" ) :
lprint ( "Route {} already in hardware: '{}'" . format ( green ( iII , False ) , II1i1iI ) )
if 28 - 28: I1ii11iIi11i - i11iIiiIii % i11iIiiIii
return
if 31 - 31: iII111i
if 64 - 64: Ii1I
if 4 - 4: OoOoOO00
if 78 - 78: i1IIi - iII111i + O0 - I1IiiI % o0oOOo0O0Ooo
if 48 - 48: iII111i / II111iiii * I1Ii111 + I11i / ooOoO0o . OoOoOO00
if 45 - 45: OOooOOo / Ii1I % O0
if 7 - 7: oO0o * i11iIiiIii + OoooooooOO + I11i
Ii1Ii1111iii = commands . getoutput ( "ifconfig | egrep 'vxlan|vlan4094'" )
if ( Ii1Ii1111iii . find ( "vxlan" ) == - 1 ) :
lprint ( "No VXLAN interface found, cannot program hardware" )
return
if 4 - 4: i11iIiiIii
if ( Ii1Ii1111iii . find ( "vlan4094" ) == - 1 ) :
lprint ( "No vlan4094 interface found, cannot program hardware" )
return
if 53 - 53: I1Ii111
ooO00O00OO0oo = commands . getoutput ( "ip addr | egrep vlan4094 | egrep inet" )
if ( ooO00O00OO0oo == "" ) :
lprint ( "No IP address found on vlan4094, cannot program hardware" )
return
if 41 - 41: I11i + I1IiiI + oO0o . Ii1I
ooO00O00OO0oo = ooO00O00OO0oo . split ( "inet " ) [ 1 ]
ooO00O00OO0oo = ooO00O00OO0oo . split ( "/" ) [ 0 ]
if 71 - 71: iIii1I11I1II1 / I1ii11iIi11i + OoooooooOO . ooOoO0o
if 63 - 63: i11iIiiIii % I1Ii111 % IiII * i1IIi + I1Ii111 + I1Ii111
if 51 - 51: iII111i / Ii1I . iII111i + O0 / IiII + OoooooooOO
if 29 - 29: I1IiiI - OOooOOo
if 83 - 83: OoOoOO00 * oO0o . OOooOOo - OoO0O00
if 73 - 73: I1ii11iIi11i / iII111i / Oo0Ooo
if 85 - 85: Ii1I
Oooo0 = [ ]
iiOOoOOOo = commands . getoutput ( "arp -i vlan4094" ) . split ( "\n" )
for IIIIIiI11Ii in iiOOoOOOo :
if ( IIIIIiI11Ii . find ( "vlan4094" ) == - 1 ) : continue
if ( IIIIIiI11Ii . find ( "(incomplete)" ) == - 1 ) : continue
O0o0 = IIIIIiI11Ii . split ( " " ) [ 0 ]
Oooo0 . append ( O0o0 )
if 12 - 12: oO0o - i11iIiiIii / O0 + oO0o . I11i % iII111i
if 30 - 30: I1IiiI % iIii1I11I1II1
O0o0 = None
IiI1iIi1I1i = ooO00O00OO0oo
ooO00O00OO0oo = ooO00O00OO0oo . split ( "." )
for II11iIII1i1I in range ( 1 , 255 ) :
ooO00O00OO0oo [ 3 ] = str ( II11iIII1i1I )
iIiIi1iI11iiI = "." . join ( ooO00O00OO0oo )
if ( iIiIi1iI11iiI in Oooo0 ) : continue
if ( iIiIi1iI11iiI == IiI1iIi1I1i ) : continue
O0o0 = iIiIi1iI11iiI
break
if 37 - 37: OoooooooOO - Oo0Ooo % oO0o
if ( O0o0 == None ) :
lprint ( "Address allocation failed for vlan4094, cannot program " + "hardware" )
if 59 - 59: II111iiii - o0oOOo0O0Ooo / I1ii11iIi11i . oO0o / o0oOOo0O0Ooo - iII111i
return
if 65 - 65: I1ii11iIi11i * OOooOOo * ooOoO0o + oO0o - OOooOOo
if 100 - 100: iII111i
if 12 - 12: OoooooooOO - I1ii11iIi11i * iII111i / ooOoO0o
if 99 - 99: I1ii11iIi11i + I11i
if 29 - 29: I1ii11iIi11i / oO0o
if 2 - 2: Oo0Ooo / IiII - OoooooooOO
if 65 - 65: OoO0O00 - Ii1I
OO000oOooO00 = Oo0o0o0oo . split ( "." )
IIi1iii = lisp_hex_string ( OO000oOooO00 [ 1 ] ) . zfill ( 2 )
oo0O0oOOo0O = lisp_hex_string ( OO000oOooO00 [ 2 ] ) . zfill ( 2 )
iiIIi1iII1 = lisp_hex_string ( OO000oOooO00 [ 3 ] ) . zfill ( 2 )
i1IiII1i1I = "00:00:00:{}:{}:{}" . format ( IIi1iii , oo0O0oOOo0O , iiIIi1iII1 )
IIiiIi = "0000.00{}.{}{}" . format ( IIi1iii , oo0O0oOOo0O , iiIIi1iII1 )
iiIIi1I = "arp -i vlan4094 -s {} {}" . format ( O0o0 , i1IiII1i1I )
os . system ( iiIIi1I )
if 3 - 3: i1IIi * OOooOOo
if 86 - 86: OoOoOO00 * Oo0Ooo / iIii1I11I1II1
if 63 - 63: IiII - ooOoO0o % OoO0O00 * i11iIiiIii % OOooOOo
if 90 - 90: oO0o / Oo0Ooo + iII111i - O0
O0o0oOo = ( "mac address-table static {} vlan 4094 " + "interface vxlan 1 vtep {}" ) . format ( IIiiIi , Oo0o0o0oo )
if 52 - 52: iIii1I11I1II1 * OOooOOo % i1IIi
lisp_send_to_arista ( O0o0oOo , None )
if 1 - 1: o0oOOo0O0Ooo + Ii1I - o0oOOo0O0Ooo % I1ii11iIi11i
if 61 - 61: OoooooooOO
if 93 - 93: OoO0O00
if 18 - 18: OoOoOO00 - OoOoOO00 . iII111i / Oo0Ooo % Ii1I / iIii1I11I1II1
if 97 - 97: ooOoO0o * ooOoO0o / IiII / iII111i . i11iIiiIii
IIIi11Ii11I = "ip route add {} via {}" . format ( iII , O0o0 )
os . system ( IIIi11Ii11I )
if 61 - 61: I1Ii111 - I1IiiI - I11i * OoO0O00 - O0 + iII111i
lprint ( "Hardware programmed with commands:" )
IIIi11Ii11I = IIIi11Ii11I . replace ( iII , green ( iII , False ) )
lprint ( " " + IIIi11Ii11I )
lprint ( " " + iiIIi1I )
O0o0oOo = O0o0oOo . replace ( Oo0o0o0oo , red ( Oo0o0o0oo , False ) )
lprint ( " " + O0o0oOo )
return
if 9 - 9: IiII - OOooOOo / O0 + i1IIi . O0 % oO0o
if 57 - 57: i1IIi . OOooOOo
if 72 - 72: ooOoO0o / I1IiiI - ooOoO0o * OoO0O00 . OOooOOo
if 1 - 1: o0oOOo0O0Ooo + I1Ii111 + OoO0O00 * OOooOOo / I1Ii111 % i11iIiiIii
if 49 - 49: OOooOOo - oO0o
if 73 - 73: o0oOOo0O0Ooo . I1IiiI - I11i . ooOoO0o % II111iiii . OoooooooOO
if 8 - 8: OoooooooOO
def lisp_clear_hardware_walk ( mc , parms ) :
OOO0000o = mc . eid . print_prefix_no_iid ( )
os . system ( "ip route delete {}" . format ( OOO0000o ) )
return ( [ True , None ] )
if 92 - 92: ooOoO0o + IiII * II111iiii
if 41 - 41: I1IiiI + OoOoOO00 . OOooOOo
if 57 - 57: II111iiii . iIii1I11I1II1
if 32 - 32: o0oOOo0O0Ooo
if 75 - 75: I1IiiI . II111iiii - iII111i % IiII * OoO0O00 % ooOoO0o
if 38 - 38: I1IiiI / OoooooooOO
if 16 - 16: i1IIi . i11iIiiIii . oO0o - I11i
if 96 - 96: iII111i - OoOoOO00
def lisp_clear_map_cache ( ) :
global lisp_map_cache , lisp_rloc_probe_list
global lisp_crypto_keys_by_rloc_encap , lisp_crypto_keys_by_rloc_decap
global lisp_rtr_list
if 43 - 43: OoO0O00 - I1Ii111 % OoooooooOO % I1ii11iIi11i . OoOoOO00
oO000oO0O0 = bold ( "User cleared" , False )
i1Ii11II = lisp_map_cache . cache_count
lprint ( "{} map-cache with {} entries" . format ( oO000oO0O0 , i1Ii11II ) )
if 83 - 83: i1IIi - OOooOOo * iII111i . o0oOOo0O0Ooo - I1Ii111 % oO0o
if ( lisp_program_hardware ) :
lisp_map_cache . walk_cache ( lisp_clear_hardware_walk , None )
if 11 - 11: o0oOOo0O0Ooo . OoooooooOO - i1IIi
lisp_map_cache = lisp_cache ( )
if 71 - 71: I1IiiI . OOooOOo . I1ii11iIi11i
if 90 - 90: i11iIiiIii + I1Ii111 % II111iiii
if 67 - 67: OoOoOO00 / iII111i * OoO0O00 % i11iIiiIii
if 76 - 76: OoO0O00
if 92 - 92: iIii1I11I1II1 * O0 % I11i
lisp_rloc_probe_list = { }
if 92 - 92: OoOoOO00 + oO0o
if 89 - 89: IiII % iII111i / iIii1I11I1II1 . Ii1I . Oo0Ooo + ooOoO0o
if 28 - 28: I1IiiI . iIii1I11I1II1
if 12 - 12: I1Ii111 * OOooOOo
lisp_crypto_keys_by_rloc_encap = { }
lisp_crypto_keys_by_rloc_decap = { }
if 11 - 11: II111iiii % O0 % O0 % o0oOOo0O0Ooo
if 45 - 45: OoooooooOO * oO0o
if 74 - 74: ooOoO0o * I11i / oO0o - IiII + OoOoOO00
if 16 - 16: Oo0Ooo
if 29 - 29: Oo0Ooo . I1ii11iIi11i / II111iiii / oO0o / o0oOOo0O0Ooo + I11i
lisp_rtr_list = { }
if 4 - 4: OoooooooOO % I1ii11iIi11i . OoO0O00 * o0oOOo0O0Ooo + I1ii11iIi11i * IiII
if 67 - 67: I1IiiI
if 93 - 93: ooOoO0o . Ii1I + IiII / Oo0Ooo % I11i
if 40 - 40: Oo0Ooo % OoOoOO00 . IiII / I1IiiI % OoooooooOO
lisp_process_data_plane_restart ( True )
return
if 33 - 33: OOooOOo - OoooooooOO . iII111i
if 2 - 2: I11i + i1IIi
if 52 - 52: I11i - OoO0O00 % I1Ii111 . OOooOOo
if 90 - 90: O0 - Oo0Ooo / i1IIi * iIii1I11I1II1 % o0oOOo0O0Ooo / oO0o
if 73 - 73: iII111i % iIii1I11I1II1 + o0oOOo0O0Ooo % Ii1I . II111iiii + IiII
if 55 - 55: OoOoOO00 * II111iiii / iII111i + OOooOOo / OoooooooOO
if 12 - 12: II111iiii * O0 - Oo0Ooo + o0oOOo0O0Ooo . Oo0Ooo + iIii1I11I1II1
if 4 - 4: I1Ii111 - I1Ii111 / I1ii11iIi11i . i1IIi + I1ii11iIi11i / oO0o
if 18 - 18: iIii1I11I1II1 . ooOoO0o
if 68 - 68: o0oOOo0O0Ooo
if 36 - 36: Oo0Ooo . I11i + I1IiiI * i1IIi % Ii1I + OOooOOo
def lisp_encapsulate_rloc_probe ( lisp_sockets , rloc , nat_info , packet ) :
if ( len ( lisp_sockets ) != 4 ) : return
if 5 - 5: o0oOOo0O0Ooo % oO0o / OoO0O00
IiiIIII1 = lisp_myrlocs [ 0 ]
if 21 - 21: OoooooooOO
if 63 - 63: I1IiiI / o0oOOo0O0Ooo - I1Ii111
if 49 - 49: iII111i . OoOoOO00
if 91 - 91: OOooOOo / Ii1I / IiII * OOooOOo
if 68 - 68: I11i
OOOOO000oo0 = len ( packet ) + 28
oOo00OoO0O = struct . pack ( "BBHIBBHII" , 0x45 , 0 , socket . htons ( OOOOO000oo0 ) , 0 , 64 ,
17 , 0 , socket . htonl ( IiiIIII1 . address ) , socket . htonl ( rloc . address ) )
oOo00OoO0O = lisp_ip_checksum ( oOo00OoO0O )
if 91 - 91: I11i
OOOOo00oo00O = struct . pack ( "HHHH" , 0 , socket . htons ( LISP_CTRL_PORT ) ,
socket . htons ( OOOOO000oo0 - 20 ) , 0 )
if 24 - 24: ooOoO0o . i1IIi - O0 + I11i
if 71 - 71: OoOoOO00
if 29 - 29: O0 . i11iIiiIii
if 51 - 51: IiII
packet = lisp_packet ( oOo00OoO0O + OOOOo00oo00O + packet )
if 53 - 53: O0
if 19 - 19: o0oOOo0O0Ooo / iII111i % OoOoOO00
if 65 - 65: o0oOOo0O0Ooo
if 89 - 89: iIii1I11I1II1 + OoooooooOO + i1IIi + OoooooooOO % IiII * OoO0O00
packet . inner_dest . copy_address ( rloc )
packet . inner_dest . instance_id = 0xffffff
packet . inner_source . copy_address ( IiiIIII1 )
packet . inner_ttl = 64
packet . outer_dest . copy_address ( rloc )
packet . outer_source . copy_address ( IiiIIII1 )
packet . outer_version = packet . outer_dest . afi_to_version ( )
packet . outer_ttl = 64
packet . encap_port = nat_info . port if nat_info else LISP_DATA_PORT
if 53 - 53: OOooOOo . IiII % I11i - OoO0O00 - Oo0Ooo
ooOOo00o0ooO = red ( rloc . print_address_no_iid ( ) , False )
if ( nat_info ) :
ooOOO00000oo = " {}" . format ( blue ( nat_info . hostname , False ) )
oo00OO0Oooo = bold ( "RLOC-probe request" , False )
else :
ooOOO00000oo = ""
oo00OO0Oooo = bold ( "RLOC-probe reply" , False )
if 58 - 58: I1Ii111 / OoooooooOO . I11i % I1Ii111
if 8 - 8: Oo0Ooo % ooOoO0o / i11iIiiIii
lprint ( ( "Data encapsulate {} to {}{} port {} for " + "NAT-traversal" ) . format ( oo00OO0Oooo , ooOOo00o0ooO , ooOOO00000oo , packet . encap_port ) )
if 54 - 54: IiII
if 85 - 85: OOooOOo - i1IIi
if 10 - 10: I1ii11iIi11i
if 3 - 3: ooOoO0o * O0 / o0oOOo0O0Ooo
if 22 - 22: OoOoOO00 + OOooOOo . iII111i % iIii1I11I1II1 - I11i
if ( packet . encode ( None ) == None ) : return
packet . print_packet ( "Send" , True )
if 23 - 23: OoOoOO00 * I1Ii111
IIiii1i1I1I = lisp_sockets [ 3 ]
packet . send_packet ( IIiii1i1I1I , packet . outer_dest )
del ( packet )
return
if 41 - 41: ooOoO0o
if 54 - 54: ooOoO0o / o0oOOo0O0Ooo / II111iiii
if 77 - 77: Oo0Ooo
if 53 - 53: ooOoO0o * iIii1I11I1II1 . oO0o * Oo0Ooo . Oo0Ooo % iIii1I11I1II1
if 7 - 7: ooOoO0o + Ii1I
if 25 - 25: OoO0O00 * oO0o
if 29 - 29: OOooOOo - I1Ii111 - i11iIiiIii % i1IIi
if 2 - 2: i11iIiiIii % iIii1I11I1II1 * OOooOOo
def lisp_get_default_route_next_hops ( ) :
if 45 - 45: oO0o + i1IIi + iII111i + o0oOOo0O0Ooo * OOooOOo + ooOoO0o
if 83 - 83: OoO0O00 - ooOoO0o / OoooooooOO % iIii1I11I1II1 - II111iiii
if 73 - 73: Oo0Ooo + II111iiii - IiII
if 60 - 60: i1IIi . i11iIiiIii / i1IIi . I11i % OOooOOo
if ( lisp_is_macos ( ) ) :
iiI1i = "route -n get default"
II1111 = commands . getoutput ( iiI1i ) . split ( "\n" )
Oo0O00ooo0O = II111IiiiI1 = None
for Iiooo000o0OoOo in II1111 :
if ( Iiooo000o0OoOo . find ( "gateway: " ) != - 1 ) : Oo0O00ooo0O = Iiooo000o0OoOo . split ( ": " ) [ 1 ]
if ( Iiooo000o0OoOo . find ( "interface: " ) != - 1 ) : II111IiiiI1 = Iiooo000o0OoOo . split ( ": " ) [ 1 ]
if 77 - 77: iIii1I11I1II1
return ( [ [ II111IiiiI1 , Oo0O00ooo0O ] ] )
if 46 - 46: oO0o . OoO0O00
if 82 - 82: OoooooooOO * Ii1I + O0 * I1IiiI + ooOoO0o
if 82 - 82: OoO0O00 + II111iiii % II111iiii / o0oOOo0O0Ooo
if 89 - 89: OoOoOO00 . I1ii11iIi11i * o0oOOo0O0Ooo . OoOoOO00 - i11iIiiIii * IiII
if 37 - 37: OoooooooOO - I1Ii111 . Ii1I . i1IIi * IiII / ooOoO0o
iiI1i = "ip route | egrep 'default via'"
II1i1 = commands . getoutput ( iiI1i ) . split ( "\n" )
if 12 - 12: OoooooooOO
Oooo0Oo00O00 = [ ]
for II1i1iI in II1i1 :
if ( II1i1iI . find ( " metric " ) != - 1 ) : continue
Oo0O = II1i1iI . split ( " " )
try :
IiOO0 = Oo0O . index ( "via" ) + 1
if ( IiOO0 >= len ( Oo0O ) ) : continue
oOOOoOoO0o = Oo0O . index ( "dev" ) + 1
if ( oOOOoOoO0o >= len ( Oo0O ) ) : continue
except :
continue
if 91 - 91: iII111i . OOooOOo / iIii1I11I1II1 . Oo0Ooo . II111iiii . OoOoOO00
if 31 - 31: OoO0O00 . I1ii11iIi11i % I11i - II111iiii
Oooo0Oo00O00 . append ( [ Oo0O [ oOOOoOoO0o ] , Oo0O [ IiOO0 ] ] )
if 70 - 70: ooOoO0o - IiII - OoO0O00 / I11i
return ( Oooo0Oo00O00 )
if 59 - 59: IiII % ooOoO0o . iII111i / Ii1I * Ii1I
if 73 - 73: I1ii11iIi11i . oO0o % I11i . I1ii11iIi11i / I1Ii111 / II111iiii
if 23 - 23: OoooooooOO . o0oOOo0O0Ooo
if 76 - 76: I1Ii111
if 91 - 91: iIii1I11I1II1 / Ii1I . I1IiiI
if 63 - 63: ooOoO0o . Ii1I - I1Ii111 - oO0o * I1Ii111 + ooOoO0o
if 85 - 85: II111iiii + I1ii11iIi11i
def lisp_get_host_route_next_hop ( rloc ) :
iiI1i = "ip route | egrep '{} via'" . format ( rloc )
II1i1iI = commands . getoutput ( iiI1i ) . split ( " " )
if 33 - 33: iII111i
try : oo0OOo0O = II1i1iI . index ( "via" ) + 1
except : return ( None )
if 14 - 14: O0 * Oo0Ooo / i1IIi
if ( oo0OOo0O >= len ( II1i1iI ) ) : return ( None )
return ( II1i1iI [ oo0OOo0O ] )
if 95 - 95: O0 % i1IIi % ooOoO0o % oO0o - I1IiiI
if 78 - 78: II111iiii % OOooOOo
if 6 - 6: OOooOOo
if 21 - 21: I1Ii111 - Ii1I - i1IIi % oO0o
if 55 - 55: OOooOOo + oO0o - II111iiii
if 5 - 5: iII111i * OoooooooOO . OoO0O00 % ooOoO0o + Ii1I
if 59 - 59: OoOoOO00
def lisp_install_host_route ( dest , nh , install ) :
install = "add" if install else "delete"
IIiii1IiiIiii = "none" if nh == None else nh
if 96 - 96: I1IiiI
lprint ( "{} host-route {}, nh {}" . format ( install . title ( ) , dest , IIiii1IiiIiii ) )
if 3 - 3: OoooooooOO
if ( nh == None ) :
I1iIiI = "ip route {} {}/32" . format ( install , dest )
else :
I1iIiI = "ip route {} {}/32 via {}" . format ( install , dest , nh )
if 3 - 3: IiII / O0 * i11iIiiIii . iII111i - iIii1I11I1II1
os . system ( I1iIiI )
return
if 56 - 56: ooOoO0o
if 82 - 82: ooOoO0o . IiII . I1Ii111 - iIii1I11I1II1 + II111iiii . OoOoOO00
if 59 - 59: Oo0Ooo
if 98 - 98: I1Ii111 * II111iiii / Oo0Ooo . Oo0Ooo % I1Ii111
if 52 - 52: OoOoOO00
if 59 - 59: ooOoO0o / OoooooooOO
if 71 - 71: OOooOOo + I11i * O0 / o0oOOo0O0Ooo + I1IiiI + Ii1I
if 41 - 41: ooOoO0o * I1Ii111
def lisp_checkpoint ( checkpoint_list ) :
if ( lisp_checkpoint_map_cache == False ) : return
if 40 - 40: OoOoOO00
Iiooo000o0OoOo = open ( lisp_checkpoint_filename , "w" )
for iiIIIIiI111 in checkpoint_list :
Iiooo000o0OoOo . write ( iiIIIIiI111 + "\n" )
if 60 - 60: IiII . i11iIiiIii * II111iiii . Ii1I
Iiooo000o0OoOo . close ( )
lprint ( "{} {} entries to file '{}'" . format ( bold ( "Checkpoint" , False ) ,
len ( checkpoint_list ) , lisp_checkpoint_filename ) )
return
if 10 - 10: O0
if 65 - 65: I11i % i11iIiiIii + i11iIiiIii % II111iiii
if 95 - 95: I1Ii111 - I11i . II111iiii . i1IIi / II111iiii + Oo0Ooo
if 96 - 96: iIii1I11I1II1 * iII111i / OOooOOo * iIii1I11I1II1 - O0
if 28 - 28: I11i / I1IiiI - I1Ii111 + I1ii11iIi11i % iIii1I11I1II1
if 35 - 35: iIii1I11I1II1 % Oo0Ooo % iII111i / iIii1I11I1II1 - I1ii11iIi11i . Oo0Ooo
if 81 - 81: II111iiii + oO0o
if 67 - 67: ooOoO0o + I11i - I1ii11iIi11i - OoooooooOO
def lisp_load_checkpoint ( ) :
if ( lisp_checkpoint_map_cache == False ) : return
if ( os . path . exists ( lisp_checkpoint_filename ) == False ) : return
if 37 - 37: I11i % I1IiiI
Iiooo000o0OoOo = open ( lisp_checkpoint_filename , "r" )
if 32 - 32: OOooOOo + OoooooooOO . IiII . Oo0Ooo * iII111i
i1Ii11II = 0
for iiIIIIiI111 in Iiooo000o0OoOo :
i1Ii11II += 1
Oo0ooo0Ooo = iiIIIIiI111 . split ( " rloc " )
ooo0o0 = [ ] if ( Oo0ooo0Ooo [ 1 ] in [ "native-forward\n" , "\n" ] ) else Oo0ooo0Ooo [ 1 ] . split ( ", " )
if 86 - 86: I1ii11iIi11i . iII111i + Ii1I - IiII / i11iIiiIii + OoOoOO00
if 50 - 50: o0oOOo0O0Ooo - IiII + OoOoOO00 - II111iiii
iiiI11II1IiIi = [ ]
for Oo0o0o0oo in ooo0o0 :
O0OO0O = lisp_rloc ( False )
Oo0O = Oo0o0o0oo . split ( " " )
O0OO0O . rloc . store_address ( Oo0O [ 0 ] )
O0OO0O . priority = int ( Oo0O [ 1 ] )
O0OO0O . weight = int ( Oo0O [ 2 ] )
iiiI11II1IiIi . append ( O0OO0O )
if 24 - 24: I1Ii111 - IiII % I1IiiI - OoooooooOO % Ii1I
if 56 - 56: I1ii11iIi11i
ooooOoo000O = lisp_mapping ( "" , "" , iiiI11II1IiIi )
if ( ooooOoo000O != None ) :
ooooOoo000O . eid . store_prefix ( Oo0ooo0Ooo [ 0 ] )
ooooOoo000O . checkpoint_entry = True
ooooOoo000O . map_cache_ttl = LISP_NMR_TTL * 60
if ( iiiI11II1IiIi == [ ] ) : ooooOoo000O . action = LISP_NATIVE_FORWARD_ACTION
ooooOoo000O . add_cache ( )
continue
if 40 - 40: OoooooooOO
if 100 - 100: IiII - I11i
i1Ii11II -= 1
if 79 - 79: iII111i % O0
if 73 - 73: Oo0Ooo
Iiooo000o0OoOo . close ( )
lprint ( "{} {} map-cache entries from file '{}'" . format (
bold ( "Loaded" , False ) , i1Ii11II , lisp_checkpoint_filename ) )
return
if 13 - 13: OOooOOo - ooOoO0o
if 8 - 8: I1Ii111 % oO0o
if 19 - 19: O0 + OoO0O00 - i1IIi % OoOoOO00 / Oo0Ooo + OoooooooOO
if 93 - 93: i11iIiiIii % OOooOOo . I11i * ooOoO0o
if 90 - 90: OoO0O00
if 54 - 54: OOooOOo + Oo0Ooo * o0oOOo0O0Ooo - iIii1I11I1II1 * ooOoO0o
if 76 - 76: i11iIiiIii * I1IiiI - IiII . o0oOOo0O0Ooo % iII111i . i11iIiiIii
if 69 - 69: O0 + o0oOOo0O0Ooo / ooOoO0o
if 7 - 7: Ii1I . Ii1I . iIii1I11I1II1 / ooOoO0o
if 70 - 70: O0
if 42 - 42: I1Ii111 + OoooooooOO + I11i
if 48 - 48: Oo0Ooo . IiII / ooOoO0o + I11i
if 40 - 40: I1IiiI + I1ii11iIi11i * I1IiiI % Ii1I
if 27 - 27: O0 / Oo0Ooo . oO0o
def lisp_write_checkpoint_entry ( checkpoint_list , mc ) :
if ( lisp_checkpoint_map_cache == False ) : return
if 34 - 34: I1Ii111 % Ii1I / Oo0Ooo % ooOoO0o / i11iIiiIii * I1IiiI
iiIIIIiI111 = "{} rloc " . format ( mc . eid . print_prefix ( ) )
if 36 - 36: i11iIiiIii * i1IIi % iII111i . Oo0Ooo
for O0OO0O in mc . rloc_set :
if ( O0OO0O . rloc . is_null ( ) ) : continue
iiIIIIiI111 += "{} {} {}, " . format ( O0OO0O . rloc . print_address_no_iid ( ) ,
O0OO0O . priority , O0OO0O . weight )
if 54 - 54: o0oOOo0O0Ooo % i1IIi % I1ii11iIi11i . o0oOOo0O0Ooo / OoOoOO00
if 55 - 55: O0 / OoooooooOO % Ii1I * O0 + iIii1I11I1II1 . iIii1I11I1II1
if ( mc . rloc_set != [ ] ) :
iiIIIIiI111 = iiIIIIiI111 [ 0 : - 2 ]
elif ( mc . action == LISP_NATIVE_FORWARD_ACTION ) :
iiIIIIiI111 += "native-forward"
if 55 - 55: Ii1I . OoooooooOO % Ii1I . IiII
if 67 - 67: oO0o
checkpoint_list . append ( iiIIIIiI111 )
return
if 12 - 12: I1IiiI + OoooooooOO
if 25 - 25: iIii1I11I1II1 - I1IiiI . i11iIiiIii + ooOoO0o
if 19 - 19: OoooooooOO / IiII
if 40 - 40: OoOoOO00 / OoooooooOO * iIii1I11I1II1 / i1IIi . OoooooooOO
if 88 - 88: I1IiiI % I1IiiI / II111iiii - IiII
if 72 - 72: OoO0O00 - I1ii11iIi11i . Oo0Ooo / OoO0O00
if 86 - 86: i11iIiiIii - oO0o . i11iIiiIii
def lisp_check_dp_socket ( ) :
oO0O0oooo = lisp_ipc_dp_socket_name
if ( os . path . exists ( oO0O0oooo ) == False ) :
IiiiiI1i1iiIiIi = bold ( "does not exist" , False )
lprint ( "Socket '{}' {}" . format ( oO0O0oooo , IiiiiI1i1iiIiIi ) )
return ( False )
if 74 - 74: o0oOOo0O0Ooo
return ( True )
if 15 - 15: oO0o % Oo0Ooo * i1IIi / OoO0O00 . iIii1I11I1II1 - O0
if 20 - 20: ooOoO0o + Oo0Ooo - Oo0Ooo
if 2 - 2: i1IIi - IiII . I1ii11iIi11i / i1IIi
if 92 - 92: ooOoO0o - iII111i
if 69 - 69: iII111i
if 48 - 48: O0 + o0oOOo0O0Ooo . oO0o - IiII * OoooooooOO . OoO0O00
if 63 - 63: oO0o * OoO0O00 * oO0o
def lisp_write_to_dp_socket ( entry ) :
try :
i11i11 = json . dumps ( entry )
oO0Ii1Ii = bold ( "Write IPC" , False )
lprint ( "{} record to named socket: '{}'" . format ( oO0Ii1Ii , i11i11 ) )
lisp_ipc_dp_socket . sendto ( i11i11 , lisp_ipc_dp_socket_name )
except :
lprint ( "Failed to write IPC record to named socket: '{}'" . format ( i11i11 ) )
if 20 - 20: I1Ii111 . II111iiii % II111iiii
return
if 79 - 79: II111iiii . I11i + o0oOOo0O0Ooo % I1ii11iIi11i + I1ii11iIi11i
if 4 - 4: I1ii11iIi11i % OoooooooOO
if 43 - 43: IiII - I1Ii111 % ooOoO0o
if 49 - 49: OoOoOO00
if 43 - 43: I1Ii111 - Oo0Ooo % i1IIi . II111iiii
if 80 - 80: IiII . iII111i + I1Ii111 + iII111i % Oo0Ooo
if 98 - 98: i11iIiiIii . II111iiii + OoOoOO00
if 25 - 25: I1IiiI + i11iIiiIii . I1Ii111 - I1ii11iIi11i
if 67 - 67: OOooOOo - OOooOOo * I1IiiI - II111iiii . i1IIi + Oo0Ooo
def lisp_write_ipc_keys ( rloc ) :
ooOOo0o = rloc . rloc . print_address_no_iid ( )
Iiiii = rloc . translated_port
if ( Iiiii != 0 ) : ooOOo0o += ":" + str ( Iiiii )
if ( lisp_rloc_probe_list . has_key ( ooOOo0o ) == False ) : return
if 97 - 97: O0 / i11iIiiIii - o0oOOo0O0Ooo - OoOoOO00 . oO0o
for Oo0O , Oo0ooo0Ooo , o0 in lisp_rloc_probe_list [ ooOOo0o ] :
ooooOoo000O = lisp_map_cache . lookup_cache ( Oo0ooo0Ooo , True )
if ( ooooOoo000O == None ) : continue
lisp_write_ipc_map_cache ( True , ooooOoo000O )
if 77 - 77: oO0o * oO0o . OoOoOO00 . i1IIi
return
if 90 - 90: OOooOOo . Ii1I . II111iiii + Ii1I
if 2 - 2: I1Ii111 * OOooOOo + II111iiii - OoOoOO00
if 94 - 94: Ii1I - iII111i . I1ii11iIi11i - Oo0Ooo % o0oOOo0O0Ooo + I1Ii111
if 58 - 58: oO0o . ooOoO0o . I1IiiI . Oo0Ooo * iIii1I11I1II1 - iII111i
if 96 - 96: OOooOOo % o0oOOo0O0Ooo / iIii1I11I1II1
if 60 - 60: i1IIi / iIii1I11I1II1 + I11i % iII111i
if 64 - 64: I11i . i11iIiiIii / iIii1I11I1II1 . I11i
def lisp_write_ipc_map_cache ( add_or_delete , mc , dont_send = False ) :
if ( lisp_i_am_etr ) : return
if ( lisp_ipc_dp_socket == None ) : return
if ( lisp_check_dp_socket ( ) == False ) : return
if 73 - 73: OoO0O00 % iIii1I11I1II1 + IiII * I1Ii111 % II111iiii
if 20 - 20: I11i % I1ii11iIi11i . OoO0O00 % OoOoOO00
if 84 - 84: OoooooooOO / i11iIiiIii . IiII / I1IiiI
if 62 - 62: iII111i - I1IiiI + OoooooooOO
Oo0oo0oOO0oOo = "add" if add_or_delete else "delete"
iiIIIIiI111 = { "type" : "map-cache" , "opcode" : Oo0oo0oOO0oOo }
if 59 - 59: iIii1I11I1II1 + i11iIiiIii * oO0o . Oo0Ooo . I1Ii111
O0OOo0OO0oOo = ( mc . group . is_null ( ) == False )
if ( O0OOo0OO0oOo ) :
iiIIIIiI111 [ "eid-prefix" ] = mc . group . print_prefix_no_iid ( )
iiIIIIiI111 [ "rles" ] = [ ]
else :
iiIIIIiI111 [ "eid-prefix" ] = mc . eid . print_prefix_no_iid ( )
iiIIIIiI111 [ "rlocs" ] = [ ]
if 49 - 49: II111iiii
iiIIIIiI111 [ "instance-id" ] = str ( mc . eid . instance_id )
if 99 - 99: Oo0Ooo . OOooOOo
if ( O0OOo0OO0oOo ) :
if ( len ( mc . rloc_set ) >= 1 and mc . rloc_set [ 0 ] . rle ) :
for I1I1iiI in mc . rloc_set [ 0 ] . rle . rle_forwarding_list :
iIiIi1iI11iiI = I1I1iiI . address . print_address_no_iid ( )
Iiiii = str ( 4341 ) if I1I1iiI . translated_port == 0 else str ( I1I1iiI . translated_port )
if 85 - 85: OoOoOO00 . IiII + oO0o - II111iiii
Oo0O = { "rle" : iIiIi1iI11iiI , "port" : Iiiii }
i1iIII1i , oo0oO0OoO00 = I1I1iiI . get_encap_keys ( )
Oo0O = lisp_build_json_keys ( Oo0O , i1iIII1i , oo0oO0OoO00 , "encrypt-key" )
iiIIIIiI111 [ "rles" ] . append ( Oo0O )
if 89 - 89: Ii1I / Oo0Ooo * o0oOOo0O0Ooo / OoO0O00 + I11i
if 4 - 4: I11i
else :
for Oo0o0o0oo in mc . rloc_set :
if ( Oo0o0o0oo . rloc . is_ipv4 ( ) == False and Oo0o0o0oo . rloc . is_ipv6 ( ) == False ) :
continue
if 59 - 59: OoOoOO00 * I1ii11iIi11i / I1IiiI * II111iiii + OoOoOO00
if ( Oo0o0o0oo . up_state ( ) == False ) : continue
if 6 - 6: OoOoOO00 % oO0o + I11i * Ii1I
Iiiii = str ( 4341 ) if Oo0o0o0oo . translated_port == 0 else str ( Oo0o0o0oo . translated_port )
if 13 - 13: I1ii11iIi11i / Oo0Ooo - I1Ii111 * OoOoOO00
Oo0O = { "rloc" : Oo0o0o0oo . rloc . print_address_no_iid ( ) , "priority" :
str ( Oo0o0o0oo . priority ) , "weight" : str ( Oo0o0o0oo . weight ) , "port" :
Iiiii }
i1iIII1i , oo0oO0OoO00 = Oo0o0o0oo . get_encap_keys ( )
Oo0O = lisp_build_json_keys ( Oo0O , i1iIII1i , oo0oO0OoO00 , "encrypt-key" )
iiIIIIiI111 [ "rlocs" ] . append ( Oo0O )
if 47 - 47: IiII
if 76 - 76: iII111i / II111iiii / I11i
if 62 - 62: I1ii11iIi11i
if ( dont_send == False ) : lisp_write_to_dp_socket ( iiIIIIiI111 )
return ( iiIIIIiI111 )
if 100 - 100: iII111i / ooOoO0o / IiII % II111iiii
if 6 - 6: OoooooooOO - I1IiiI + OoooooooOO
if 89 - 89: oO0o % Oo0Ooo . O0 . ooOoO0o
if 46 - 46: IiII * I11i - OoO0O00 - Ii1I
if 93 - 93: iIii1I11I1II1 / o0oOOo0O0Ooo - I11i - OOooOOo % ooOoO0o
if 16 - 16: ooOoO0o * o0oOOo0O0Ooo - IiII + I1ii11iIi11i / o0oOOo0O0Ooo - O0
if 71 - 71: i1IIi
def lisp_write_ipc_decap_key ( rloc_addr , keys ) :
if ( lisp_i_am_itr ) : return
if ( lisp_ipc_dp_socket == None ) : return
if ( lisp_check_dp_socket ( ) == False ) : return
if 79 - 79: iII111i * O0 / Ii1I / O0 % i1IIi
if 52 - 52: OoooooooOO % oO0o - I11i % OoOoOO00 . II111iiii
if 62 - 62: Ii1I . I1ii11iIi11i . iII111i + I11i * o0oOOo0O0Ooo
if 56 - 56: oO0o * iIii1I11I1II1 . II111iiii - II111iiii + II111iiii - i11iIiiIii
if ( keys == None or len ( keys ) == 0 or keys [ 1 ] == None ) : return
if 79 - 79: iII111i
i1iIII1i = keys [ 1 ] . encrypt_key
oo0oO0OoO00 = keys [ 1 ] . icv_key
if 29 - 29: Ii1I * I1Ii111 / OoO0O00 - O0 - i11iIiiIii * I1IiiI
if 2 - 2: OoOoOO00 . I1ii11iIi11i * I1ii11iIi11i
if 42 - 42: OoO0O00 . OoO0O00 + II111iiii - IiII - OOooOOo * Oo0Ooo
if 47 - 47: oO0o - OoooooooOO + iII111i
OO0oOOO00 = rloc_addr . split ( ":" )
if ( len ( OO0oOOO00 ) == 1 ) :
iiIIIIiI111 = { "type" : "decap-keys" , "rloc" : OO0oOOO00 [ 0 ] }
else :
iiIIIIiI111 = { "type" : "decap-keys" , "rloc" : OO0oOOO00 [ 0 ] , "port" : OO0oOOO00 [ 1 ] }
if 5 - 5: ooOoO0o . OoO0O00
iiIIIIiI111 = lisp_build_json_keys ( iiIIIIiI111 , i1iIII1i , oo0oO0OoO00 , "decrypt-key" )
if 40 - 40: iII111i
lisp_write_to_dp_socket ( iiIIIIiI111 )
return
if 87 - 87: IiII / II111iiii
if 44 - 44: OoO0O00 . I1Ii111 - OoooooooOO * OoOoOO00 . OoO0O00
if 84 - 84: OOooOOo . OOooOOo . oO0o % iII111i * Oo0Ooo - iIii1I11I1II1
if 4 - 4: iII111i
if 23 - 23: i1IIi . iIii1I11I1II1 / I1IiiI . OoOoOO00 . iII111i / IiII
if 65 - 65: Ii1I + IiII + I11i / I1Ii111 % iIii1I11I1II1
if 17 - 17: I1ii11iIi11i * OOooOOo % II111iiii
if 30 - 30: I1Ii111 . Ii1I . Oo0Ooo / OOooOOo * OoooooooOO / I1ii11iIi11i
def lisp_build_json_keys ( entry , ekey , ikey , key_type ) :
if ( ekey == None ) : return ( entry )
if 41 - 41: i1IIi
entry [ "keys" ] = [ ]
Iiii11 = { "key-id" : "1" , key_type : ekey , "icv-key" : ikey }
entry [ "keys" ] . append ( Iiii11 )
return ( entry )
if 75 - 75: o0oOOo0O0Ooo . I1Ii111 - I1Ii111 % Ii1I * OoooooooOO
if 99 - 99: OOooOOo + o0oOOo0O0Ooo - OOooOOo . i1IIi
if 86 - 86: Ii1I % oO0o - i11iIiiIii - O0 + IiII + iII111i
if 100 - 100: OoO0O00 . Oo0Ooo
if 29 - 29: OoO0O00
if 34 - 34: O0 - o0oOOo0O0Ooo % OOooOOo . OoO0O00 % IiII
if 63 - 63: O0 % iIii1I11I1II1 . o0oOOo0O0Ooo . I1IiiI * Ii1I % i1IIi
def lisp_write_ipc_database_mappings ( ephem_port ) :
if ( lisp_i_am_etr == False ) : return
if ( lisp_ipc_dp_socket == None ) : return
if ( lisp_check_dp_socket ( ) == False ) : return
if 47 - 47: II111iiii * I1ii11iIi11i
if 70 - 70: I1ii11iIi11i - o0oOOo0O0Ooo
if 71 - 71: I1ii11iIi11i * i1IIi
if 67 - 67: I1ii11iIi11i % OoOoOO00 . iII111i / Ii1I . I1IiiI
iiIIIIiI111 = { "type" : "database-mappings" , "database-mappings" : [ ] }
if 48 - 48: IiII + II111iiii . I1IiiI % o0oOOo0O0Ooo
if 57 - 57: OOooOOo . I11i % OoOoOO00
if 68 - 68: iIii1I11I1II1 % I1ii11iIi11i % II111iiii / O0 + iII111i
if 78 - 78: iII111i - OOooOOo / I1Ii111
for iIiIIi1i in lisp_db_list :
if ( iIiIIi1i . eid . is_ipv4 ( ) == False and iIiIIi1i . eid . is_ipv6 ( ) == False ) : continue
I1IiIIIIii1 = { "instance-id" : str ( iIiIIi1i . eid . instance_id ) ,
"eid-prefix" : iIiIIi1i . eid . print_prefix_no_iid ( ) }
iiIIIIiI111 [ "database-mappings" ] . append ( I1IiIIIIii1 )
if 99 - 99: o0oOOo0O0Ooo . oO0o
lisp_write_to_dp_socket ( iiIIIIiI111 )
if 9 - 9: oO0o % OoooooooOO
if 62 - 62: OoO0O00 / OoOoOO00 / I1Ii111 + Oo0Ooo - Ii1I
if 72 - 72: OoO0O00 + I11i / iII111i % OOooOOo
if 5 - 5: oO0o % OOooOOo
if 95 - 95: OoOoOO00 + OoooooooOO - O0 + o0oOOo0O0Ooo
iiIIIIiI111 = { "type" : "etr-nat-port" , "port" : ephem_port }
lisp_write_to_dp_socket ( iiIIIIiI111 )
return
if 88 - 88: i11iIiiIii . iIii1I11I1II1
if 57 - 57: Ii1I * iIii1I11I1II1
if 92 - 92: Ii1I % Ii1I . I11i / i1IIi % Oo0Ooo
if 25 - 25: o0oOOo0O0Ooo - OoO0O00 - OoOoOO00 - ooOoO0o
if 28 - 28: OOooOOo * ooOoO0o * OoooooooOO % IiII
if 9 - 9: OoooooooOO
if 92 - 92: I1Ii111 + O0 + OoO0O00 % IiII
def lisp_write_ipc_interfaces ( ) :
if ( lisp_i_am_etr ) : return
if ( lisp_ipc_dp_socket == None ) : return
if ( lisp_check_dp_socket ( ) == False ) : return
if 31 - 31: Ii1I / Oo0Ooo - I1IiiI - I11i - i11iIiiIii
if 45 - 45: ooOoO0o - IiII / OoO0O00 / IiII
if 63 - 63: ooOoO0o . i11iIiiIii + iII111i . OoO0O00 / ooOoO0o % iII111i
if 23 - 23: iIii1I11I1II1 - ooOoO0o / I11i * I11i
iiIIIIiI111 = { "type" : "interfaces" , "interfaces" : [ ] }
if 62 - 62: OOooOOo - I1IiiI * oO0o + O0 / ooOoO0o * iIii1I11I1II1
for II111IiiiI1 in lisp_myinterfaces . values ( ) :
if ( II111IiiiI1 . instance_id == None ) : continue
I1IiIIIIii1 = { "interface" : II111IiiiI1 . device ,
"instance-id" : str ( II111IiiiI1 . instance_id ) }
iiIIIIiI111 [ "interfaces" ] . append ( I1IiIIIIii1 )
if 25 - 25: I1Ii111 % Oo0Ooo + OoO0O00 % OOooOOo
if 85 - 85: I1IiiI . i11iIiiIii - ooOoO0o * I11i * OoOoOO00 * I11i
lisp_write_to_dp_socket ( iiIIIIiI111 )
return
if 29 - 29: I1Ii111 * I1Ii111 . iII111i + o0oOOo0O0Ooo
if 57 - 57: I1Ii111 - IiII
if 89 - 89: oO0o + iII111i
if 52 - 52: OOooOOo % O0 * I1ii11iIi11i . I1ii11iIi11i / IiII
if 7 - 7: II111iiii
if 7 - 7: iIii1I11I1II1 . O0 + Ii1I % I1IiiI * O0 + OoO0O00
if 3 - 3: Oo0Ooo * OoooooooOO * oO0o % OoOoOO00 * OoOoOO00 . ooOoO0o
if 16 - 16: ooOoO0o / o0oOOo0O0Ooo - O0 * I1IiiI
if 13 - 13: iII111i . iII111i % O0 % o0oOOo0O0Ooo
if 99 - 99: OoO0O00 - OoOoOO00 + OoO0O00
if 67 - 67: I1Ii111
if 31 - 31: OoO0O00 * Oo0Ooo % O0 * II111iiii + ooOoO0o * I1IiiI
if 77 - 77: ooOoO0o
if 98 - 98: I1Ii111 + I1ii11iIi11i % OoO0O00 * Ii1I + iII111i
def lisp_parse_auth_key ( value ) :
OOOo0O = value . split ( "[" )
i1i1 = { }
if ( len ( OOOo0O ) == 1 ) :
i1i1 [ 0 ] = value
return ( i1i1 )
if 2 - 2: i1IIi
if 60 - 60: OOooOOo + I1ii11iIi11i / OoOoOO00 * i1IIi / O0
for IIiIi1IIiI1i in OOOo0O :
if ( IIiIi1IIiI1i == "" ) : continue
oo0OOo0O = IIiIi1IIiI1i . find ( "]" )
OoooOOo0oOO = IIiIi1IIiI1i [ 0 : oo0OOo0O ]
try : OoooOOo0oOO = int ( OoooOOo0oOO )
except : return
if 24 - 24: Oo0Ooo . IiII % o0oOOo0O0Ooo . OOooOOo . I1IiiI + I1Ii111
i1i1 [ OoooOOo0oOO ] = IIiIi1IIiI1i [ oo0OOo0O + 1 : : ]
if 51 - 51: Oo0Ooo * I11i % i1IIi / iIii1I11I1II1 . OoooooooOO
return ( i1i1 )
if 5 - 5: iIii1I11I1II1 % oO0o - II111iiii - OoOoOO00 / i1IIi
if 20 - 20: II111iiii * OoOoOO00 . Ii1I . I1ii11iIi11i
if 91 - 91: oO0o / OoOoOO00 % I1Ii111 % I1Ii111 / ooOoO0o
if 39 - 39: OoO0O00 + OoO0O00 * iIii1I11I1II1 + I11i / OoO0O00
if 82 - 82: I1IiiI / I1IiiI - iII111i % I1ii11iIi11i
if 84 - 84: iII111i
if 24 - 24: oO0o - OoO0O00 + I1Ii111
if 98 - 98: iII111i . oO0o - O0 % I1IiiI . I1ii11iIi11i / i1IIi
if 72 - 72: I1IiiI / Oo0Ooo % IiII - O0 / O0 * O0
if 83 - 83: O0 / I1Ii111 - OoooooooOO
if 42 - 42: Ii1I / i1IIi - IiII / I1Ii111
if 39 - 39: OoooooooOO
if 4 - 4: iIii1I11I1II1 - Oo0Ooo / OOooOOo % OoooooooOO . Oo0Ooo - Oo0Ooo
if 41 - 41: II111iiii . o0oOOo0O0Ooo
if 92 - 92: Ii1I - O0 - i11iIiiIii + IiII % I1Ii111 + II111iiii
if 71 - 71: ooOoO0o * I1Ii111 + i11iIiiIii + i1IIi . I1IiiI
def lisp_reassemble ( packet ) :
ooO = socket . ntohs ( struct . unpack ( "H" , packet [ 6 : 8 ] ) [ 0 ] )
if 15 - 15: OoO0O00
if 37 - 37: OoO0O00 . OoooooooOO - OOooOOo
if 34 - 34: o0oOOo0O0Ooo + iIii1I11I1II1 / o0oOOo0O0Ooo / ooOoO0o
if 53 - 53: II111iiii / iIii1I11I1II1
if ( ooO == 0 or ooO == 0x4000 ) : return ( packet )
if 25 - 25: I1Ii111
if 58 - 58: OoOoOO00 * i1IIi
if 20 - 20: IiII
if 81 - 81: I1Ii111 . i1IIi / o0oOOo0O0Ooo
i1111I = socket . ntohs ( struct . unpack ( "H" , packet [ 4 : 6 ] ) [ 0 ] )
iiii1Iiii = socket . ntohs ( struct . unpack ( "H" , packet [ 2 : 4 ] ) [ 0 ] )
if 57 - 57: ooOoO0o
O0OOOoOOOO0 = ( ooO & 0x2000 == 0 and ( ooO & 0x1fff ) != 0 )
iiIIIIiI111 = [ ( ooO & 0x1fff ) * 8 , iiii1Iiii - 20 , packet , O0OOOoOOOO0 ]
if 9 - 9: o0oOOo0O0Ooo % i1IIi / OoO0O00 / OOooOOo + I1Ii111
if 80 - 80: Oo0Ooo . iIii1I11I1II1 . OoooooooOO % iII111i . oO0o
if 10 - 10: i11iIiiIii * OoooooooOO . i11iIiiIii
if 35 - 35: OOooOOo * OOooOOo + o0oOOo0O0Ooo / i1IIi - I11i
if 12 - 12: I1ii11iIi11i - i11iIiiIii + I1IiiI . Oo0Ooo
if 26 - 26: oO0o + I1Ii111 + IiII * o0oOOo0O0Ooo . oO0o
if 95 - 95: OoOoOO00 . I1Ii111 / Ii1I . I1Ii111 % OoO0O00
if 16 - 16: Ii1I / I1IiiI / I1IiiI - OoooooooOO
if ( ooO == 0x2000 ) :
O00o , o0o0ooOo00 = struct . unpack ( "HH" , packet [ 20 : 24 ] )
O00o = socket . ntohs ( O00o )
o0o0ooOo00 = socket . ntohs ( o0o0ooOo00 )
if ( o0o0ooOo00 not in [ 4341 , 8472 , 4789 ] and O00o != 4341 ) :
lisp_reassembly_queue [ i1111I ] = [ ]
iiIIIIiI111 [ 2 ] = None
if 13 - 13: OOooOOo / OoooooooOO
if 7 - 7: II111iiii - ooOoO0o
if 72 - 72: Ii1I
if 27 - 27: ooOoO0o / IiII + OoO0O00 + Ii1I % I1Ii111
if 86 - 86: O0 % i11iIiiIii - Ii1I * oO0o % OOooOOo * i1IIi
if 87 - 87: II111iiii
if ( lisp_reassembly_queue . has_key ( i1111I ) == False ) :
lisp_reassembly_queue [ i1111I ] = [ ]
if 53 - 53: OoOoOO00 * i11iIiiIii / I1Ii111
if 100 - 100: ooOoO0o + I1IiiI * oO0o + ooOoO0o
if 24 - 24: i11iIiiIii + ooOoO0o
if 80 - 80: IiII % I11i % oO0o
if 97 - 97: i1IIi * i11iIiiIii / Ii1I - I1IiiI % IiII
oo0Oo00oo0OoO0O0 = lisp_reassembly_queue [ i1111I ]
if 38 - 38: IiII . OoO0O00 * IiII % ooOoO0o * Ii1I / ooOoO0o
if 56 - 56: O0 / OoooooooOO / OoOoOO00
if 19 - 19: o0oOOo0O0Ooo / i11iIiiIii . i1IIi / Oo0Ooo / I1Ii111
if 83 - 83: iII111i % o0oOOo0O0Ooo * OoOoOO00
if 49 - 49: II111iiii / OoO0O00
if ( len ( oo0Oo00oo0OoO0O0 ) == 1 and oo0Oo00oo0OoO0O0 [ 0 ] [ 2 ] == None ) :
dprint ( "Drop non-LISP encapsulated fragment 0x{}" . format ( lisp_hex_string ( i1111I ) . zfill ( 4 ) ) )
if 69 - 69: Ii1I * II111iiii
return ( None )
if 24 - 24: I1Ii111 * I1ii11iIi11i . OOooOOo . I1IiiI - I1ii11iIi11i
if 56 - 56: I1IiiI * Oo0Ooo + OoO0O00 - oO0o * I1Ii111
if 68 - 68: ooOoO0o * i11iIiiIii * OOooOOo % iII111i
if 10 - 10: Ii1I / Oo0Ooo - i1IIi
if 11 - 11: I11i * iII111i
oo0Oo00oo0OoO0O0 . append ( iiIIIIiI111 )
oo0Oo00oo0OoO0O0 = sorted ( oo0Oo00oo0OoO0O0 )
if 28 - 28: II111iiii + IiII / Oo0Ooo * I1IiiI - OOooOOo
if 2 - 2: oO0o + I11i / I1Ii111 . I11i
if 59 - 59: Ii1I
if 47 - 47: iII111i % iII111i
iIiIi1iI11iiI = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
iIiIi1iI11iiI . address = socket . ntohl ( struct . unpack ( "I" , packet [ 12 : 16 ] ) [ 0 ] )
OOoO0 = iIiIi1iI11iiI . print_address_no_iid ( )
iIiIi1iI11iiI . address = socket . ntohl ( struct . unpack ( "I" , packet [ 16 : 20 ] ) [ 0 ] )
iIi1IooOoOOoo0 = iIiIi1iI11iiI . print_address_no_iid ( )
iIiIi1iI11iiI = red ( "{} -> {}" . format ( OOoO0 , iIi1IooOoOOoo0 ) , False )
if 33 - 33: OOooOOo % OoO0O00 - O0 + I1IiiI + i11iIiiIii
dprint ( "{}{} fragment, RLOCs: {}, packet 0x{}, frag-offset: 0x{}" . format ( bold ( "Received" , False ) , " non-LISP encapsulated" if iiIIIIiI111 [ 2 ] == None else "" , iIiIi1iI11iiI , lisp_hex_string ( i1111I ) . zfill ( 4 ) ,
# iIii1I11I1II1 - iII111i - oO0o + Oo0Ooo . Ii1I / i11iIiiIii
# ooOoO0o - I1Ii111
lisp_hex_string ( ooO ) . zfill ( 4 ) ) )
if 97 - 97: OOooOOo
if 87 - 87: iII111i
if 73 - 73: II111iiii
if 2 - 2: i1IIi % iII111i . oO0o / II111iiii * I1IiiI
if 17 - 17: O0 + iII111i + oO0o / iIii1I11I1II1 % oO0o
if ( oo0Oo00oo0OoO0O0 [ 0 ] [ 0 ] != 0 or oo0Oo00oo0OoO0O0 [ - 1 ] [ 3 ] == False ) : return ( None )
O00oooooOo0OO = oo0Oo00oo0OoO0O0 [ 0 ]
for O0O in oo0Oo00oo0OoO0O0 [ 1 : : ] :
ooO = O0O [ 0 ]
o00oO0O0O0 , IiI1IIiIiI1I = O00oooooOo0OO [ 0 ] , O00oooooOo0OO [ 1 ]
if ( o00oO0O0O0 + IiI1IIiIiI1I != ooO ) : return ( None )
O00oooooOo0OO = O0O
if 78 - 78: oO0o - II111iiii . II111iiii * I1Ii111 % O0 - iII111i
lisp_reassembly_queue . pop ( i1111I )
if 59 - 59: Oo0Ooo - IiII
if 6 - 6: OOooOOo - I1IiiI . IiII
if 40 - 40: II111iiii
if 13 - 13: OoOoOO00
if 23 - 23: Oo0Ooo / II111iiii % OOooOOo % iII111i - Oo0Ooo / OoO0O00
packet = oo0Oo00oo0OoO0O0 [ 0 ] [ 2 ]
for O0O in oo0Oo00oo0OoO0O0 [ 1 : : ] : packet += O0O [ 2 ] [ 20 : : ]
if 7 - 7: Ii1I / I11i / II111iiii % I11i * I11i + iIii1I11I1II1
dprint ( "{} fragments arrived for packet 0x{}, length {}" . format ( bold ( "All" , False ) , lisp_hex_string ( i1111I ) . zfill ( 4 ) , len ( packet ) ) )
if 6 - 6: iIii1I11I1II1 * oO0o - iIii1I11I1II1 . O0 . O0
if 96 - 96: I1Ii111 * II111iiii % i11iIiiIii - oO0o
if 32 - 32: i11iIiiIii * o0oOOo0O0Ooo . OoooooooOO / O0
if 14 - 14: i11iIiiIii . I1Ii111 % I1ii11iIi11i . I1ii11iIi11i % IiII
if 93 - 93: iIii1I11I1II1 / IiII
OOOOO000oo0 = socket . htons ( len ( packet ) )
oooooOOo0Oo = packet [ 0 : 2 ] + struct . pack ( "H" , OOOOO000oo0 ) + packet [ 4 : 6 ] + struct . pack ( "H" , 0 ) + packet [ 8 : 10 ] + struct . pack ( "H" , 0 ) + packet [ 12 : 20 ]
if 91 - 91: i11iIiiIii % ooOoO0o - iII111i * I1Ii111 . i11iIiiIii
if 1 - 1: IiII + iIii1I11I1II1 * I1ii11iIi11i - IiII - i1IIi
oooooOOo0Oo = lisp_ip_checksum ( oooooOOo0Oo )
return ( oooooOOo0Oo + packet [ 20 : : ] )
if 75 - 75: II111iiii * o0oOOo0O0Ooo / I1ii11iIi11i
if 46 - 46: OOooOOo
if 67 - 67: OoO0O00 . I11i % OOooOOo + Oo0Ooo
if 40 - 40: OoO0O00 / I11i % iIii1I11I1II1 - ooOoO0o
if 51 - 51: Oo0Ooo % iIii1I11I1II1 % oO0o + o0oOOo0O0Ooo
if 32 - 32: I1Ii111 * I1IiiI + Ii1I
if 30 - 30: OoooooooOO / I1IiiI . iIii1I11I1II1 / ooOoO0o
if 20 - 20: OoooooooOO * OOooOOo
def lisp_get_crypto_decap_lookup_key ( addr , port ) :
ooOOo0o = addr . print_address_no_iid ( ) + ":" + str ( port )
if ( lisp_crypto_keys_by_rloc_decap . has_key ( ooOOo0o ) ) : return ( ooOOo0o )
if 77 - 77: Ii1I - OoooooooOO . OoOoOO00
ooOOo0o = addr . print_address_no_iid ( )
if ( lisp_crypto_keys_by_rloc_decap . has_key ( ooOOo0o ) ) : return ( ooOOo0o )
if 93 - 93: OoooooooOO / I1Ii111
if 91 - 91: I1Ii111
if 18 - 18: ooOoO0o * I11i
if 53 - 53: I11i . i11iIiiIii - iIii1I11I1II1 / I1Ii111
if 86 - 86: i1IIi % OoO0O00 - OoooooooOO
for OO0Ii1iii1iIIII in lisp_crypto_keys_by_rloc_decap :
ii1iI1iI1 = OO0Ii1iii1iIIII . split ( ":" )
if ( len ( ii1iI1iI1 ) == 1 ) : continue
ii1iI1iI1 = ii1iI1iI1 [ 0 ] if len ( ii1iI1iI1 ) == 2 else ":" . join ( ii1iI1iI1 [ 0 : - 1 ] )
if ( ii1iI1iI1 == ooOOo0o ) :
i1iIi = lisp_crypto_keys_by_rloc_decap [ OO0Ii1iii1iIIII ]
lisp_crypto_keys_by_rloc_decap [ ooOOo0o ] = i1iIi
return ( ooOOo0o )
if 57 - 57: O0 - I1Ii111 . IiII
if 56 - 56: OoooooooOO
return ( None )
if 12 - 12: ooOoO0o
if 97 - 97: i1IIi . Oo0Ooo
if 81 - 81: OoOoOO00
if 81 - 81: O0
if 57 - 57: oO0o - o0oOOo0O0Ooo % i11iIiiIii / OoOoOO00 . iIii1I11I1II1
if 68 - 68: iII111i
if 59 - 59: O0 - i11iIiiIii + OoooooooOO - iII111i - Oo0Ooo . OoooooooOO
if 60 - 60: O0 * iIii1I11I1II1 - Ii1I * II111iiii . ooOoO0o
if 61 - 61: I1IiiI . iII111i
if 19 - 19: iIii1I11I1II1 * Oo0Ooo - I1IiiI - I1IiiI + O0 - I1Ii111
if 56 - 56: I1Ii111 - i1IIi + I11i . i1IIi / II111iiii * oO0o
def lisp_build_crypto_decap_lookup_key ( addr , port ) :
addr = addr . print_address_no_iid ( )
o0oo000o = addr + ":" + str ( port )
if 68 - 68: OoO0O00 % I11i % IiII + Ii1I
if ( lisp_i_am_rtr ) :
if ( lisp_rloc_probe_list . has_key ( addr ) ) : return ( addr )
if 86 - 86: i1IIi / O0
if 64 - 64: I1Ii111 + O0 * IiII % OoOoOO00 % OOooOOo - iII111i
if 73 - 73: ooOoO0o + I1IiiI % oO0o . O0
if 18 - 18: o0oOOo0O0Ooo * I11i
if 24 - 24: oO0o / o0oOOo0O0Ooo + i1IIi
if 15 - 15: i11iIiiIii / O0
for iiI in lisp_nat_state_info . values ( ) :
for I1II1i1Ii1 in iiI :
if ( addr == I1II1i1Ii1 . address ) : return ( o0oo000o )
if 34 - 34: I1Ii111 . IiII % iII111i
if 94 - 94: OOooOOo % i11iIiiIii . OOooOOo
return ( addr )
if 55 - 55: OoOoOO00 . OoOoOO00 % o0oOOo0O0Ooo . I11i . I1ii11iIi11i - o0oOOo0O0Ooo
return ( o0oo000o )
if 1 - 1: i11iIiiIii - i1IIi * oO0o - iIii1I11I1II1
if 75 - 75: i1IIi * i11iIiiIii
if 40 - 40: I1ii11iIi11i + OoO0O00
if 8 - 8: i11iIiiIii - iIii1I11I1II1
if 73 - 73: OoOoOO00
if 25 - 25: iII111i / oO0o
if 61 - 61: OoooooooOO . Ii1I . I11i + oO0o
def lisp_set_ttl ( lisp_socket , ttl ) :
try :
lisp_socket . setsockopt ( socket . SOL_IP , socket . IP_TTL , ttl )
except :
lprint ( "socket.setsockopt(IP_TTL) not supported" )
pass
if 73 - 73: II111iiii % i11iIiiIii * I1ii11iIi11i + O0
return
if 61 - 61: I1IiiI / OOooOOo
if 67 - 67: OoOoOO00
if 22 - 22: Ii1I * I1ii11iIi11i * o0oOOo0O0Ooo - I1IiiI . i11iIiiIii
if 30 - 30: O0 / oO0o * i11iIiiIii + iIii1I11I1II1 + O0 % I1IiiI
if 95 - 95: ooOoO0o % OOooOOo
if 17 - 17: i1IIi + Ii1I
if 35 - 35: iIii1I11I1II1 - Oo0Ooo - OoooooooOO % I1ii11iIi11i
def lisp_is_rloc_probe_request ( lisp_type ) :
lisp_type = struct . unpack ( "B" , lisp_type ) [ 0 ]
return ( lisp_type == 0x12 )
if 27 - 27: Oo0Ooo * II111iiii - OOooOOo + o0oOOo0O0Ooo
if 26 - 26: oO0o / I1ii11iIi11i - oO0o
if 9 - 9: ooOoO0o * iIii1I11I1II1 * OoooooooOO
if 13 - 13: iII111i . i11iIiiIii * o0oOOo0O0Ooo . iII111i
if 96 - 96: Ii1I
if 90 - 90: II111iiii
if 93 - 93: i11iIiiIii / Ii1I * Oo0Ooo . iII111i % iII111i / IiII
def lisp_is_rloc_probe_reply ( lisp_type ) :
lisp_type = struct . unpack ( "B" , lisp_type ) [ 0 ]
return ( lisp_type == 0x28 )
if 15 - 15: OoOoOO00 % I1Ii111 - iIii1I11I1II1
if 52 - 52: i11iIiiIii * ooOoO0o
if 15 - 15: OoooooooOO . oO0o . i11iIiiIii / o0oOOo0O0Ooo
if 91 - 91: ooOoO0o
if 47 - 47: II111iiii + I11i + ooOoO0o % Oo0Ooo / iII111i
if 9 - 9: O0 + IiII
if 69 - 69: I1IiiI
if 11 - 11: I11i % I1Ii111 + O0 . Ii1I . I1ii11iIi11i % I1Ii111
if 28 - 28: IiII . o0oOOo0O0Ooo + iII111i - OoOoOO00 / OOooOOo
if 86 - 86: ooOoO0o * OoOoOO00 + oO0o / II111iiii % OOooOOo
if 89 - 89: O0 * Ii1I / OoO0O00 / OoOoOO00 % iII111i * iIii1I11I1II1
if 72 - 72: iIii1I11I1II1 / iIii1I11I1II1 * I11i
if 19 - 19: I1ii11iIi11i
if 42 - 42: OoOoOO00 / IiII
if 65 - 65: ooOoO0o - ooOoO0o * OoO0O00
if 99 - 99: I11i % ooOoO0o . I1Ii111
if 34 - 34: ooOoO0o + oO0o + II111iiii . I1Ii111 . i1IIi
if 14 - 14: OoO0O00 . ooOoO0o - i1IIi * I1IiiI
if 24 - 24: iIii1I11I1II1 / I1Ii111
def lisp_is_rloc_probe ( packet , rr ) :
OOOOo00oo00O = ( struct . unpack ( "B" , packet [ 9 ] ) [ 0 ] == 17 )
if ( OOOOo00oo00O == False ) : return ( [ packet , None , None , None ] )
if 16 - 16: OoOoOO00 * I1Ii111 - I1IiiI / I1Ii111
O00o = struct . unpack ( "H" , packet [ 20 : 22 ] ) [ 0 ]
o0o0ooOo00 = struct . unpack ( "H" , packet [ 22 : 24 ] ) [ 0 ]
OOooOOOOoo0o0 = ( socket . htons ( LISP_CTRL_PORT ) in [ O00o , o0o0ooOo00 ] )
if ( OOooOOOOoo0o0 == False ) : return ( [ packet , None , None , None ] )
if 10 - 10: II111iiii . O0
if ( rr == 0 ) :
oo00OO0Oooo = lisp_is_rloc_probe_request ( packet [ 28 ] )
if ( oo00OO0Oooo == False ) : return ( [ packet , None , None , None ] )
elif ( rr == 1 ) :
oo00OO0Oooo = lisp_is_rloc_probe_reply ( packet [ 28 ] )
if ( oo00OO0Oooo == False ) : return ( [ packet , None , None , None ] )
elif ( rr == - 1 ) :
oo00OO0Oooo = lisp_is_rloc_probe_request ( packet [ 28 ] )
if ( oo00OO0Oooo == False ) :
oo00OO0Oooo = lisp_is_rloc_probe_reply ( packet [ 28 ] )
if ( oo00OO0Oooo == False ) : return ( [ packet , None , None , None ] )
if 46 - 46: iIii1I11I1II1
if 8 - 8: I1ii11iIi11i % I11i - i1IIi . Oo0Ooo * I1Ii111
if 44 - 44: iII111i
if 56 - 56: II111iiii / Oo0Ooo % IiII * II111iiii - iIii1I11I1II1 + ooOoO0o
if 33 - 33: o0oOOo0O0Ooo . I11i / I1IiiI
if 29 - 29: o0oOOo0O0Ooo - ooOoO0o
oo = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
oo . address = socket . ntohl ( struct . unpack ( "I" , packet [ 12 : 16 ] ) [ 0 ] )
if 59 - 59: I11i / IiII * OoO0O00 / IiII . I1Ii111
if 82 - 82: OOooOOo . iIii1I11I1II1 + I1Ii111
if 14 - 14: IiII . i11iIiiIii
if 17 - 17: ooOoO0o % ooOoO0o * oO0o
if ( oo . is_local ( ) ) : return ( [ None , None , None , None ] )
if 8 - 8: ooOoO0o + OoO0O00 . II111iiii / iIii1I11I1II1 - OOooOOo
if 87 - 87: iIii1I11I1II1 . IiII % I1IiiI . OoO0O00 - I1Ii111
if 53 - 53: I1Ii111 % i11iIiiIii
if 99 - 99: I1IiiI - i1IIi * i11iIiiIii + OoO0O00
oo = oo . print_address_no_iid ( )
Iiiii = socket . ntohs ( struct . unpack ( "H" , packet [ 20 : 22 ] ) [ 0 ] )
Ii1 = struct . unpack ( "B" , packet [ 8 ] ) [ 0 ] - 1
packet = packet [ 28 : : ]
if 80 - 80: o0oOOo0O0Ooo . I11i % iIii1I11I1II1 + OoOoOO00
Oo0O = bold ( "Receive(pcap)" , False )
Iiooo000o0OoOo = bold ( "from " + oo , False )
i111 = lisp_format_packet ( packet )
lprint ( "{} {} bytes {} {}, packet: {}" . format ( Oo0O , len ( packet ) , Iiooo000o0OoOo , Iiiii , i111 ) )
if 87 - 87: I1Ii111 + II111iiii / I1ii11iIi11i + OoOoOO00
return ( [ packet , oo , Iiiii , Ii1 ] )
if 71 - 71: I1IiiI + iIii1I11I1II1 + O0 * iII111i % IiII
if 42 - 42: OOooOOo - I1ii11iIi11i
if 93 - 93: I1Ii111 + OOooOOo % ooOoO0o / I1Ii111 % OOooOOo . IiII
if 37 - 37: iII111i * oO0o / oO0o / Ii1I % I11i
if 12 - 12: i11iIiiIii
if 62 - 62: oO0o + OOooOOo + oO0o + I1IiiI
if 10 - 10: IiII - Oo0Ooo % ooOoO0o
if 38 - 38: oO0o * o0oOOo0O0Ooo . I11i % II111iiii / I11i % Ii1I
if 19 - 19: II111iiii / i11iIiiIii * II111iiii + OoOoOO00 - OoOoOO00
if 7 - 7: OoOoOO00 - OoO0O00 % OoOoOO00 . I1ii11iIi11i % Oo0Ooo * iII111i
if 90 - 90: IiII - OOooOOo + iIii1I11I1II1
def lisp_ipc_write_xtr_parameters ( cp , dp ) :
if ( lisp_ipc_dp_socket == None ) : return
if 88 - 88: ooOoO0o . o0oOOo0O0Ooo . OOooOOo - I11i
oOooOOoo = { "type" : "xtr-parameters" , "control-plane-logging" : cp ,
"data-plane-logging" : dp , "rtr" : lisp_i_am_rtr }
if 76 - 76: IiII % I1IiiI . iII111i
lisp_write_to_dp_socket ( oOooOOoo )
return
if 5 - 5: ooOoO0o . oO0o - OoOoOO00 - OoooooooOO
if 2 - 2: OOooOOo
if 37 - 37: IiII - iIii1I11I1II1 * i11iIiiIii . ooOoO0o
if 78 - 78: OOooOOo - I1ii11iIi11i + iII111i % OoOoOO00
if 28 - 28: I11i + i1IIi / i11iIiiIii * OOooOOo * II111iiii
if 78 - 78: OoO0O00 - i1IIi % I1Ii111
if 87 - 87: I11i
if 37 - 37: iII111i . I1Ii111 - iII111i - I11i - iIii1I11I1II1 - II111iiii
def lisp_external_data_plane ( ) :
iiI1i = 'egrep "ipc-data-plane = yes" ./lisp.config'
if ( commands . getoutput ( iiI1i ) != "" ) : return ( True )
if 80 - 80: I1Ii111 % O0 - IiII / II111iiii + i1IIi
if ( os . getenv ( "LISP_RUN_LISP_XTR" ) != None ) : return ( True )
return ( False )
if 4 - 4: OOooOOo + II111iiii
if 1 - 1: OoooooooOO * I1Ii111 - I11i / IiII
if 43 - 43: i11iIiiIii * I1IiiI
if 48 - 48: Oo0Ooo - OOooOOo / iII111i % I1ii11iIi11i . OoOoOO00
if 6 - 6: i11iIiiIii
if 51 - 51: o0oOOo0O0Ooo - OoooooooOO - I11i % i11iIiiIii / I1IiiI + IiII
if 91 - 91: O0
if 13 - 13: o0oOOo0O0Ooo
if 15 - 15: iIii1I11I1II1 * Oo0Ooo . iIii1I11I1II1 . Ii1I % iII111i - i11iIiiIii
if 77 - 77: ooOoO0o - o0oOOo0O0Ooo * OoOoOO00 % oO0o
if 4 - 4: i11iIiiIii + OoOoOO00
if 45 - 45: ooOoO0o / OoooooooOO . Oo0Ooo
if 35 - 35: i11iIiiIii / o0oOOo0O0Ooo / oO0o / I11i . O0
if 53 - 53: i1IIi
def lisp_process_data_plane_restart ( do_clear = False ) :
os . system ( "touch ./lisp.config" )
if 51 - 51: OoOoOO00 / iIii1I11I1II1 . oO0o - I1ii11iIi11i - OOooOOo
Oo0OO = { "type" : "entire-map-cache" , "entries" : [ ] }
if 94 - 94: I11i / iII111i + o0oOOo0O0Ooo - II111iiii . O0
if ( do_clear == False ) :
oO0 = Oo0OO [ "entries" ]
lisp_map_cache . walk_cache ( lisp_ipc_walk_map_cache , oO0 )
if 97 - 97: I1IiiI % iII111i * oO0o - i1IIi
if 7 - 7: oO0o / ooOoO0o / IiII - I1ii11iIi11i * IiII % O0
lisp_write_to_dp_socket ( Oo0OO )
return
if 41 - 41: Ii1I + IiII / O0 . iIii1I11I1II1
if 71 - 71: oO0o / o0oOOo0O0Ooo % iIii1I11I1II1 * iIii1I11I1II1
if 29 - 29: ooOoO0o - OoOoOO00 - o0oOOo0O0Ooo
if 54 - 54: Ii1I + i11iIiiIii + i1IIi - OoooooooOO
if 100 - 100: oO0o . ooOoO0o
if 14 - 14: OoooooooOO + iII111i / iIii1I11I1II1 / ooOoO0o % iIii1I11I1II1 - IiII
if 34 - 34: I1ii11iIi11i + i11iIiiIii - I1ii11iIi11i / OoOoOO00 + i1IIi . i11iIiiIii
if 48 - 48: I1ii11iIi11i % OoOoOO00 * OoOoOO00 % o0oOOo0O0Ooo * II111iiii / OoOoOO00
if 73 - 73: OoOoOO00 + OOooOOo * II111iiii . OOooOOo % I1Ii111 % oO0o
if 79 - 79: I1ii11iIi11i % I11i
if 78 - 78: i11iIiiIii % I1Ii111 + iIii1I11I1II1 + iII111i
if 66 - 66: I1IiiI - o0oOOo0O0Ooo
if 67 - 67: oO0o . iII111i * Ii1I - OOooOOo / oO0o
if 98 - 98: OoOoOO00 * OoO0O00 . Oo0Ooo
def lisp_process_data_plane_stats ( msg , lisp_sockets , lisp_port ) :
if ( msg . has_key ( "entries" ) == False ) :
lprint ( "No 'entries' in stats IPC message" )
return
if 6 - 6: I11i % iIii1I11I1II1 + I1Ii111
if ( type ( msg [ "entries" ] ) != list ) :
lprint ( "'entries' in stats IPC message must be an array" )
return
if 48 - 48: II111iiii . OOooOOo . ooOoO0o - iII111i
if 90 - 90: OOooOOo
for msg in msg [ "entries" ] :
if ( msg . has_key ( "eid-prefix" ) == False ) :
lprint ( "No 'eid-prefix' in stats IPC message" )
continue
if 43 - 43: IiII + ooOoO0o
oO00oo000O = msg [ "eid-prefix" ]
if 4 - 4: i1IIi
if ( msg . has_key ( "instance-id" ) == False ) :
lprint ( "No 'instance-id' in stats IPC message" )
continue
if 89 - 89: Oo0Ooo / iIii1I11I1II1 . OoOoOO00
II1 = int ( msg [ "instance-id" ] )
if 6 - 6: Ii1I / iII111i
if 69 - 69: iIii1I11I1II1 % I1Ii111 % OOooOOo + O0 - OoOoOO00 % oO0o
if 70 - 70: oO0o - I1IiiI + Ii1I
if 54 - 54: OoOoOO00 / ooOoO0o - I1IiiI
Oo00o = lisp_address ( LISP_AFI_NONE , "" , 0 , II1 )
Oo00o . store_prefix ( oO00oo000O )
ooooOoo000O = lisp_map_cache_lookup ( None , Oo00o )
if ( ooooOoo000O == None ) :
lprint ( "Map-cache entry for {} not found for stats update" . format ( oO00oo000O ) )
if 37 - 37: o0oOOo0O0Ooo
continue
if 57 - 57: iII111i / i1IIi / i1IIi + IiII
if 75 - 75: IiII / O0
if ( msg . has_key ( "rlocs" ) == False ) :
lprint ( "No 'rlocs' in stats IPC message for {}" . format ( oO00oo000O ) )
if 72 - 72: I11i
continue
if 35 - 35: I11i % OoooooooOO / i1IIi * i1IIi / I1IiiI
if ( type ( msg [ "rlocs" ] ) != list ) :
lprint ( "'rlocs' in stats IPC message must be an array" )
continue
if 42 - 42: I11i - i1IIi - oO0o / I11i + Ii1I + ooOoO0o
iIIOO0OO = msg [ "rlocs" ]
if 67 - 67: OoO0O00 . II111iiii * O0
if 1 - 1: o0oOOo0O0Ooo + Oo0Ooo
if 20 - 20: O0
if 77 - 77: I1ii11iIi11i + OoooooooOO * OoO0O00 * iIii1I11I1II1 % I1Ii111
for iIi1 in iIIOO0OO :
if ( iIi1 . has_key ( "rloc" ) == False ) : continue
if 3 - 3: ooOoO0o . Oo0Ooo . ooOoO0o / OoO0O00 / o0oOOo0O0Ooo . I1Ii111
ooOOo00o0ooO = iIi1 [ "rloc" ]
if ( ooOOo00o0ooO == "no-address" ) : continue
if 20 - 20: iII111i + II111iiii + i11iIiiIii
Oo0o0o0oo = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
Oo0o0o0oo . store_address ( ooOOo00o0ooO )
if 75 - 75: OoooooooOO
O0OO0O = ooooOoo000O . get_rloc ( Oo0o0o0oo )
if ( O0OO0O == None ) : continue
if 63 - 63: iII111i % oO0o . ooOoO0o * I1Ii111 + o0oOOo0O0Ooo * II111iiii
if 61 - 61: oO0o
if 45 - 45: I11i * OoOoOO00 % Oo0Ooo / iII111i
if 78 - 78: II111iiii
i1iI11ii = 0 if iIi1 . has_key ( "packet-count" ) == False else iIi1 [ "packet-count" ]
if 24 - 24: II111iiii + iII111i . I1Ii111
OO00o0oo0 = 0 if iIi1 . has_key ( "byte-count" ) == False else iIi1 [ "byte-count" ]
if 29 - 29: IiII + Oo0Ooo + iII111i / OoO0O00
OOOO0O00o = 0 if iIi1 . has_key ( "seconds-last-packet" ) == False else iIi1 [ "seconds-last-packet" ]
if 69 - 69: I1IiiI % I1IiiI . OoooooooOO - ooOoO0o / I11i
if 32 - 32: iIii1I11I1II1 % oO0o / I1Ii111
O0OO0O . stats . packet_count += i1iI11ii
O0OO0O . stats . byte_count += OO00o0oo0
O0OO0O . stats . last_increment = lisp_get_timestamp ( ) - OOOO0O00o
if 42 - 42: I11i / I1ii11iIi11i - I1IiiI * iII111i / I1IiiI / i11iIiiIii
lprint ( "Update stats {}/{}/{}s for {} RLOC {}" . format ( i1iI11ii , OO00o0oo0 ,
OOOO0O00o , oO00oo000O , ooOOo00o0ooO ) )
if 75 - 75: Oo0Ooo + IiII / I11i % I11i % IiII / I1Ii111
if 95 - 95: OoOoOO00
if 78 - 78: I11i
if 62 - 62: iIii1I11I1II1 . o0oOOo0O0Ooo . ooOoO0o % oO0o % O0 % oO0o
if 51 - 51: Oo0Ooo / IiII - Oo0Ooo
if ( ooooOoo000O . group . is_null ( ) and ooooOoo000O . has_ttl_elapsed ( ) ) :
oO00oo000O = green ( ooooOoo000O . print_eid_tuple ( ) , False )
lprint ( "Refresh map-cache entry {}" . format ( oO00oo000O ) )
lisp_send_map_request ( lisp_sockets , lisp_port , None , ooooOoo000O . eid , None )
if 71 - 71: I11i * I1ii11iIi11i * OOooOOo * o0oOOo0O0Ooo
if 53 - 53: I1IiiI % I1IiiI
return
if 80 - 80: OoO0O00 - i11iIiiIii / iII111i * I1ii11iIi11i / I1IiiI - I1Ii111
if 85 - 85: IiII
if 72 - 72: iII111i * OoOoOO00
if 65 - 65: iIii1I11I1II1 / iIii1I11I1II1 % O0 / II111iiii . OOooOOo . O0
if 65 - 65: I11i
if 35 - 35: o0oOOo0O0Ooo - i11iIiiIii
if 78 - 78: ooOoO0o - II111iiii - i1IIi
if 18 - 18: OoooooooOO % OoOoOO00 - IiII / oO0o . OOooOOo . I1IiiI
if 77 - 77: I1ii11iIi11i . OoO0O00 / OoOoOO00 / O0
if 67 - 67: ooOoO0o % I11i % oO0o
if 74 - 74: II111iiii
if 44 - 44: Oo0Ooo + OoO0O00 + OoOoOO00 - I1IiiI
if 68 - 68: i11iIiiIii / OOooOOo . i1IIi . i11iIiiIii . I11i
if 56 - 56: iIii1I11I1II1 - II111iiii * i1IIi / Ii1I
if 65 - 65: OOooOOo / I1IiiI . OoooooooOO + I1IiiI + OoooooooOO + i11iIiiIii
if 20 - 20: I1IiiI + iII111i + O0 * O0
if 18 - 18: I11i - I11i . OoOoOO00 . ooOoO0o
if 31 - 31: ooOoO0o
if 87 - 87: OoooooooOO + OOooOOo - I1ii11iIi11i / I1IiiI + ooOoO0o - Oo0Ooo
if 19 - 19: ooOoO0o + I1ii11iIi11i - ooOoO0o
if 17 - 17: I11i * i1IIi + iIii1I11I1II1 % I1IiiI
if 44 - 44: IiII + I1IiiI . Ii1I % Oo0Ooo
if 97 - 97: O0
def lisp_process_data_plane_decap_stats ( msg , lisp_ipc_socket ) :
if 95 - 95: OoO0O00 % iII111i / I1IiiI * OoooooooOO
if 31 - 31: iIii1I11I1II1
if 62 - 62: o0oOOo0O0Ooo - iII111i / II111iiii . o0oOOo0O0Ooo
if 20 - 20: iIii1I11I1II1 % OOooOOo
if 91 - 91: ooOoO0o
if ( lisp_i_am_itr ) :
lprint ( "Send decap-stats IPC message to lisp-etr process" )
oOooOOoo = "stats%{}" . format ( json . dumps ( msg ) )
oOooOOoo = lisp_command_ipc ( oOooOOoo , "lisp-itr" )
lisp_ipc ( oOooOOoo , lisp_ipc_socket , "lisp-etr" )
return
if 96 - 96: I1IiiI . OOooOOo
if 94 - 94: OoooooooOO + II111iiii % ooOoO0o - II111iiii / O0
if 34 - 34: IiII % oO0o
if 54 - 54: I1IiiI
if 80 - 80: OoOoOO00 . I1IiiI / I1ii11iIi11i . iII111i
if 31 - 31: I11i * o0oOOo0O0Ooo
if 17 - 17: Ii1I * iIii1I11I1II1
if 9 - 9: o0oOOo0O0Ooo - IiII
oOooOOoo = bold ( "IPC" , False )
lprint ( "Process decap-stats {} message: '{}'" . format ( oOooOOoo , msg ) )
if 78 - 78: i11iIiiIii . o0oOOo0O0Ooo
if ( lisp_i_am_etr ) : msg = json . loads ( msg )
if 72 - 72: Oo0Ooo % II111iiii + O0 * OoOoOO00 - OOooOOo + I1Ii111
IiIii1ii = [ "good-packets" , "ICV-error" , "checksum-error" ,
"lisp-header-error" , "no-decrypt-key" , "bad-inner-version" ,
"outer-header-error" ]
if 62 - 62: iII111i
for II1IOOOoO0 in IiIii1ii :
i1iI11ii = 0 if msg . has_key ( II1IOOOoO0 ) == False else msg [ II1IOOOoO0 ] [ "packet-count" ]
if 79 - 79: I1Ii111 / I1ii11iIi11i * OoOoOO00 - iIii1I11I1II1
lisp_decap_stats [ II1IOOOoO0 ] . packet_count += i1iI11ii
if 98 - 98: i1IIi
OO00o0oo0 = 0 if msg . has_key ( II1IOOOoO0 ) == False else msg [ II1IOOOoO0 ] [ "byte-count" ]
if 19 - 19: OoO0O00 % I1ii11iIi11i + I1ii11iIi11i
lisp_decap_stats [ II1IOOOoO0 ] . byte_count += OO00o0oo0
if 3 - 3: i11iIiiIii - iIii1I11I1II1 / OoOoOO00
OOOO0O00o = 0 if msg . has_key ( II1IOOOoO0 ) == False else msg [ II1IOOOoO0 ] [ "seconds-last-packet" ]
if 34 - 34: I1IiiI . IiII / ooOoO0o + I1Ii111 / iIii1I11I1II1 + OoooooooOO
lisp_decap_stats [ II1IOOOoO0 ] . last_increment = lisp_get_timestamp ( ) - OOOO0O00o
if 80 - 80: OoO0O00 - OoOoOO00 % i1IIi / iIii1I11I1II1 . I11i - I11i
return
if 76 - 76: ooOoO0o * iII111i / Ii1I * i1IIi . I1Ii111 - o0oOOo0O0Ooo
if 52 - 52: OoOoOO00 % O0 + I1ii11iIi11i . i11iIiiIii
if 59 - 59: Ii1I - I1Ii111 . ooOoO0o - OoOoOO00 + oO0o . OoO0O00
if 88 - 88: OOooOOo - ooOoO0o * o0oOOo0O0Ooo . OoooooooOO
if 3 - 3: I1Ii111
if 24 - 24: Ii1I + i11iIiiIii * I1Ii111 - OoOoOO00 / Ii1I - OoOoOO00
if 69 - 69: I11i - I1IiiI . oO0o - OoooooooOO
if 33 - 33: o0oOOo0O0Ooo - o0oOOo0O0Ooo
if 55 - 55: OoooooooOO / IiII + i1IIi
if 54 - 54: ooOoO0o * Ii1I / Ii1I
if 15 - 15: oO0o * I1Ii111
if 11 - 11: Ii1I + o0oOOo0O0Ooo * OoooooooOO % iIii1I11I1II1
if 87 - 87: OoO0O00 + o0oOOo0O0Ooo
if 46 - 46: oO0o + OoOoOO00
if 17 - 17: Ii1I . Oo0Ooo - oO0o % OOooOOo
if 59 - 59: O0
if 75 - 75: o0oOOo0O0Ooo / OoooooooOO . I1ii11iIi11i * oO0o * I11i / OoooooooOO
def lisp_process_punt ( punt_socket , lisp_send_sockets , lisp_ephem_port ) :
i1II111ii1i , oo = punt_socket . recvfrom ( 4000 )
if 8 - 8: i1IIi
IIOoOO = json . loads ( i1II111ii1i )
if ( type ( IIOoOO ) != dict ) :
lprint ( "Invalid punt message from {}, not in JSON format" . format ( oo ) )
if 61 - 61: i11iIiiIii * Ii1I % iII111i - Ii1I * O0
return
if 39 - 39: iII111i + i1IIi * iII111i - iIii1I11I1II1
I1Ii = bold ( "Punt" , False )
lprint ( "{} message from '{}': '{}'" . format ( I1Ii , oo , IIOoOO ) )
if 95 - 95: o0oOOo0O0Ooo
if ( IIOoOO . has_key ( "type" ) == False ) :
lprint ( "Punt IPC message has no 'type' key" )
return
if 58 - 58: OOooOOo . II111iiii . I1Ii111 . I1IiiI * I11i
if 29 - 29: OOooOOo + Ii1I % Oo0Ooo % I1ii11iIi11i
if 72 - 72: IiII / II111iiii
if 25 - 25: i1IIi + OoOoOO00 + oO0o + OoooooooOO
if 21 - 21: I1ii11iIi11i
if ( IIOoOO [ "type" ] == "statistics" ) :
lisp_process_data_plane_stats ( IIOoOO , lisp_send_sockets , lisp_ephem_port )
return
if 60 - 60: i1IIi / OoO0O00 . Ii1I
if ( IIOoOO [ "type" ] == "decap-statistics" ) :
lisp_process_data_plane_decap_stats ( IIOoOO , punt_socket )
return
if 16 - 16: i11iIiiIii + OoOoOO00 % Oo0Ooo + I1ii11iIi11i * Ii1I / I1Ii111
if 26 - 26: iII111i
if 31 - 31: iII111i
if 45 - 45: OoO0O00
if 55 - 55: iIii1I11I1II1 % iIii1I11I1II1 + I11i - ooOoO0o + I1IiiI * O0
if ( IIOoOO [ "type" ] == "restart" ) :
lisp_process_data_plane_restart ( )
return
if 47 - 47: ooOoO0o + iIii1I11I1II1 * OOooOOo . I1IiiI . o0oOOo0O0Ooo
if 49 - 49: Oo0Ooo . OoOoOO00 * OOooOOo
if 86 - 86: IiII * OOooOOo + Ii1I
if 62 - 62: I11i
if 86 - 86: Oo0Ooo % II111iiii + I1Ii111 / I1ii11iIi11i
if ( IIOoOO [ "type" ] != "discovery" ) :
lprint ( "Punt IPC message has wrong format" )
return
if 15 - 15: I1IiiI / I1Ii111 % iII111i
if ( IIOoOO . has_key ( "interface" ) == False ) :
lprint ( "Invalid punt message from {}, required keys missing" . format ( oo ) )
if 57 - 57: I1Ii111 . iIii1I11I1II1 / Oo0Ooo / IiII / iII111i * OoOoOO00
return
if 35 - 35: i1IIi + I1Ii111 - ooOoO0o . I1ii11iIi11i + Oo0Ooo
if 43 - 43: oO0o . OoO0O00 * i1IIi
if 1 - 1: ooOoO0o / i1IIi
if 42 - 42: I1ii11iIi11i * ooOoO0o + OoOoOO00 % I1ii11iIi11i . IiII
if 75 - 75: OoO0O00 * i1IIi - OOooOOo % II111iiii % OoO0O00 - OoOoOO00
oO00O = IIOoOO [ "interface" ]
if ( oO00O == "" ) :
II1 = int ( IIOoOO [ "instance-id" ] )
if ( II1 == - 1 ) : return
else :
II1 = lisp_get_interface_instance_id ( oO00O , None )
if 75 - 75: I11i * IiII * ooOoO0o
if 31 - 31: Ii1I
if 72 - 72: OOooOOo * Ii1I % OoO0O00
if 72 - 72: OoOoOO00 + o0oOOo0O0Ooo - i1IIi - OoO0O00 % OoOoOO00
if 42 - 42: oO0o / i1IIi . IiII
oOoO = None
if ( IIOoOO . has_key ( "source-eid" ) ) :
IiIiii = IIOoOO [ "source-eid" ]
oOoO = lisp_address ( LISP_AFI_NONE , IiIiii , 0 , II1 )
if ( oOoO . is_null ( ) ) :
lprint ( "Invalid source-EID format '{}'" . format ( IiIiii ) )
return
if 12 - 12: i11iIiiIii . ooOoO0o
if 80 - 80: O0 / iIii1I11I1II1 % iII111i * ooOoO0o / i11iIiiIii . OoOoOO00
iII1I1iiII11I = None
if ( IIOoOO . has_key ( "dest-eid" ) ) :
oooi1IiIiiii = IIOoOO [ "dest-eid" ]
iII1I1iiII11I = lisp_address ( LISP_AFI_NONE , oooi1IiIiiii , 0 , II1 )
if ( iII1I1iiII11I . is_null ( ) ) :
lprint ( "Invalid dest-EID format '{}'" . format ( oooi1IiIiiii ) )
return
if 40 - 40: o0oOOo0O0Ooo / I1ii11iIi11i + I1IiiI / Oo0Ooo
if 83 - 83: i11iIiiIii
if 86 - 86: OoO0O00 * oO0o + ooOoO0o % iII111i
if 81 - 81: i11iIiiIii . II111iiii * I11i + Ii1I / O0 . Oo0Ooo
if 29 - 29: IiII - IiII - OoooooooOO . Ii1I % OoooooooOO - OoOoOO00
if 33 - 33: oO0o * OoO0O00 / i11iIiiIii - I1IiiI * OoO0O00
if 19 - 19: OoooooooOO
if 34 - 34: OoOoOO00 . oO0o
if ( oOoO ) :
Oo0ooo0Ooo = green ( oOoO . print_address ( ) , False )
iIiIIi1i = lisp_db_for_lookups . lookup_cache ( oOoO , False )
if ( iIiIIi1i != None ) :
if 53 - 53: oO0o + OoooooooOO * ooOoO0o
if 85 - 85: I1ii11iIi11i - o0oOOo0O0Ooo % o0oOOo0O0Ooo % iII111i * OoOoOO00
if 50 - 50: I1Ii111 + I1Ii111 + I11i - OoOoOO00
if 65 - 65: oO0o / I11i + iII111i - I1ii11iIi11i
if 80 - 80: II111iiii . i11iIiiIii
if ( iIiIIi1i . dynamic_eid_configured ( ) ) :
II111IiiiI1 = lisp_allow_dynamic_eid ( oO00O , oOoO )
if ( II111IiiiI1 != None and lisp_i_am_itr ) :
lisp_itr_discover_eid ( iIiIIi1i , oOoO , oO00O , II111IiiiI1 )
else :
lprint ( ( "Disallow dynamic source-EID {} " + "on interface {}" ) . format ( Oo0ooo0Ooo , oO00O ) )
if 66 - 66: ooOoO0o * iII111i * OOooOOo % OoO0O00 / I1ii11iIi11i
if 33 - 33: iIii1I11I1II1
if 52 - 52: iIii1I11I1II1 + O0
else :
lprint ( "Punt from non-EID source {}" . format ( Oo0ooo0Ooo ) )
if 84 - 84: OOooOOo / iII111i . I1IiiI / O0 % OOooOOo . iII111i
if 32 - 32: OoO0O00 + OoO0O00 % o0oOOo0O0Ooo / O0
if 29 - 29: iII111i % I1Ii111
if 95 - 95: OOooOOo - ooOoO0o % i1IIi / O0 % I11i . IiII
if 63 - 63: ooOoO0o
if 22 - 22: OOooOOo . i11iIiiIii + II111iiii - Oo0Ooo % i1IIi / o0oOOo0O0Ooo
if ( iII1I1iiII11I ) :
ooooOoo000O = lisp_map_cache_lookup ( oOoO , iII1I1iiII11I )
if ( ooooOoo000O == None or ooooOoo000O . action == LISP_SEND_MAP_REQUEST_ACTION ) :
if 90 - 90: IiII
if 38 - 38: i1IIi / ooOoO0o / I11i * I1ii11iIi11i / II111iiii . iIii1I11I1II1
if 52 - 52: I1ii11iIi11i % ooOoO0o * Ii1I * IiII + IiII / i11iIiiIii
if 51 - 51: iIii1I11I1II1 * o0oOOo0O0Ooo % o0oOOo0O0Ooo . Ii1I / OoooooooOO
if 23 - 23: oO0o * I1IiiI - oO0o - ooOoO0o . IiII / i11iIiiIii
if ( lisp_rate_limit_map_request ( oOoO , iII1I1iiII11I ) ) : return
lisp_send_map_request ( lisp_send_sockets , lisp_ephem_port ,
oOoO , iII1I1iiII11I , None )
else :
Oo0ooo0Ooo = green ( iII1I1iiII11I . print_address ( ) , False )
lprint ( "Map-cache entry for {} already exists" . format ( Oo0ooo0Ooo ) )
if 53 - 53: Ii1I * Ii1I . OoOoOO00 . OOooOOo / I1ii11iIi11i % O0
if 98 - 98: OOooOOo
return
if 11 - 11: OOooOOo * iIii1I11I1II1 % IiII - I1IiiI . I11i
if 29 - 29: OOooOOo % I11i - OOooOOo - OOooOOo * I11i . oO0o
if 75 - 75: II111iiii . O0 . I1Ii111 * O0 / OoooooooOO
if 60 - 60: OOooOOo - Oo0Ooo * OOooOOo / OoO0O00
if 55 - 55: I1ii11iIi11i * II111iiii * iIii1I11I1II1
if 38 - 38: iIii1I11I1II1 % I1ii11iIi11i . Ii1I + I1IiiI % i11iIiiIii - i11iIiiIii
if 62 - 62: I1Ii111 + I1IiiI
def lisp_ipc_map_cache_entry ( mc , jdata ) :
iiIIIIiI111 = lisp_write_ipc_map_cache ( True , mc , dont_send = True )
jdata . append ( iiIIIIiI111 )
return ( [ True , jdata ] )
if 9 - 9: iIii1I11I1II1 / iIii1I11I1II1
if 24 - 24: OOooOOo . I1IiiI % i11iIiiIii
if 43 - 43: OoooooooOO . o0oOOo0O0Ooo - I1ii11iIi11i + OoO0O00 . I1Ii111 . iII111i
if 1 - 1: iII111i / OoO0O00 / OoOoOO00 * Oo0Ooo * OoooooooOO
if 59 - 59: iII111i
if 14 - 14: oO0o . IiII + iIii1I11I1II1 - i1IIi
if 46 - 46: i11iIiiIii * II111iiii / i11iIiiIii % i11iIiiIii * II111iiii + i11iIiiIii
if 87 - 87: Oo0Ooo + OoO0O00 / II111iiii * OoooooooOO
def lisp_ipc_walk_map_cache ( mc , jdata ) :
if 95 - 95: I1Ii111 * o0oOOo0O0Ooo + OoO0O00 % OoOoOO00 - ooOoO0o / OoOoOO00
if 45 - 45: OoooooooOO / oO0o / o0oOOo0O0Ooo + Ii1I + O0 . iII111i
if 34 - 34: iIii1I11I1II1 . o0oOOo0O0Ooo + ooOoO0o
if 96 - 96: O0 / ooOoO0o
if ( mc . group . is_null ( ) ) : return ( lisp_ipc_map_cache_entry ( mc , jdata ) )
if 82 - 82: OoO0O00 * OOooOOo * I11i * I1Ii111 % iIii1I11I1II1
if ( mc . source_cache == None ) : return ( [ True , jdata ] )
if 50 - 50: Ii1I * Ii1I % I11i / iIii1I11I1II1 / ooOoO0o / iII111i
if 91 - 91: Ii1I - O0 . I11i - OoooooooOO * IiII . II111iiii
if 38 - 38: I1IiiI + OoO0O00
if 11 - 11: iIii1I11I1II1 + i1IIi * IiII - Oo0Ooo
if 66 - 66: I1Ii111 . Ii1I / I1ii11iIi11i / iIii1I11I1II1 + O0 / i1IIi
jdata = mc . source_cache . walk_cache ( lisp_ipc_map_cache_entry , jdata )
return ( [ True , jdata ] )
if 72 - 72: ooOoO0o . II111iiii
if 32 - 32: I1Ii111 - oO0o + OoooooooOO . OoOoOO00 + i11iIiiIii / i1IIi
if 26 - 26: I1IiiI + OoooooooOO % OoOoOO00 . IiII - II111iiii . OoOoOO00
if 37 - 37: OoO0O00 % O0 + OoOoOO00 * I11i . Ii1I * OoO0O00
if 18 - 18: o0oOOo0O0Ooo / OOooOOo
if 28 - 28: O0 / Ii1I - oO0o % I1ii11iIi11i % O0 . OoO0O00
if 100 - 100: O0
def lisp_itr_discover_eid ( db , eid , input_interface , routed_interface ,
lisp_ipc_listen_socket ) :
oO00oo000O = eid . print_address ( )
if ( db . dynamic_eids . has_key ( oO00oo000O ) ) :
db . dynamic_eids [ oO00oo000O ] . last_packet = lisp_get_timestamp ( )
return
if 19 - 19: Ii1I * iIii1I11I1II1 * Oo0Ooo - i11iIiiIii * i11iIiiIii - OOooOOo
if 88 - 88: O0 . iIii1I11I1II1 . I1ii11iIi11i
if 80 - 80: oO0o / i1IIi * iIii1I11I1II1
if 38 - 38: Ii1I
if 20 - 20: iIii1I11I1II1 + Oo0Ooo - Ii1I / i11iIiiIii . OoO0O00
oOOo0oO = lisp_dynamic_eid ( )
oOOo0oO . dynamic_eid . copy_address ( eid )
oOOo0oO . interface = routed_interface
oOOo0oO . last_packet = lisp_get_timestamp ( )
oOOo0oO . get_timeout ( routed_interface )
db . dynamic_eids [ oO00oo000O ] = oOOo0oO
if 66 - 66: OoooooooOO - Ii1I / iII111i . I1IiiI + I1ii11iIi11i - I1Ii111
I1iI1IIi1 = ""
if ( input_interface != routed_interface ) :
I1iI1IIi1 = ", routed-interface " + routed_interface
if 58 - 58: oO0o - iIii1I11I1II1 * i11iIiiIii / i11iIiiIii % I11i
if 69 - 69: iII111i * i1IIi
oOOOoo0 = green ( oO00oo000O , False ) + bold ( " discovered" , False )
lprint ( "Dynamic-EID {} on interface {}{}, timeout {}" . format ( oOOOoo0 , input_interface , I1iI1IIi1 , oOOo0oO . timeout ) )
if 33 - 33: OoO0O00 . i11iIiiIii * II111iiii - Ii1I * IiII
if 45 - 45: OoO0O00
if 15 - 15: iII111i * o0oOOo0O0Ooo * Ii1I % IiII
if 31 - 31: ooOoO0o . IiII + I1ii11iIi11i * II111iiii * iII111i + Oo0Ooo
if 35 - 35: oO0o + I1ii11iIi11i / o0oOOo0O0Ooo
oOooOOoo = "learn%{}%{}" . format ( oO00oo000O , routed_interface )
oOooOOoo = lisp_command_ipc ( oOooOOoo , "lisp-itr" )
lisp_ipc ( oOooOOoo , lisp_ipc_listen_socket , "lisp-etr" )
return
if 78 - 78: i11iIiiIii
if 21 - 21: iII111i / ooOoO0o - i11iIiiIii % iII111i
if 94 - 94: OoooooooOO / iII111i * ooOoO0o / i1IIi * i11iIiiIii * II111iiii
if 98 - 98: Ii1I * Ii1I / IiII
if 1 - 1: OOooOOo
if 47 - 47: i11iIiiIii - I11i
if 38 - 38: Oo0Ooo % OoooooooOO + iII111i
if 31 - 31: OoO0O00 + I1Ii111 / iIii1I11I1II1
if 11 - 11: ooOoO0o - OoOoOO00
if 19 - 19: O0 . OoOoOO00 - i1IIi . oO0o
if 96 - 96: o0oOOo0O0Ooo % o0oOOo0O0Ooo - OoO0O00 * iIii1I11I1II1 + ooOoO0o - ooOoO0o
if 4 - 4: OoO0O00 - OOooOOo
if 21 - 21: I1Ii111 * i11iIiiIii
def lisp_retry_decap_keys ( addr_str , packet , iv , packet_icv ) :
if ( lisp_search_decap_keys == False ) : return
if 63 - 63: oO0o + OoOoOO00
if 50 - 50: o0oOOo0O0Ooo / Oo0Ooo * ooOoO0o * Ii1I
if 97 - 97: I1IiiI / oO0o + I1Ii111 + I1Ii111
if 86 - 86: o0oOOo0O0Ooo % ooOoO0o + OoOoOO00 * ooOoO0o
if ( addr_str . find ( ":" ) != - 1 ) : return
if 20 - 20: Ii1I * iII111i / ooOoO0o
OOooOo00Ooo = lisp_crypto_keys_by_rloc_decap [ addr_str ]
if 18 - 18: Oo0Ooo * Ii1I / i11iIiiIii . OoO0O00 + OoooooooOO
for Iiii11 in lisp_crypto_keys_by_rloc_decap :
if 23 - 23: I1IiiI - I1ii11iIi11i . O0 . OoOoOO00 . OoO0O00
if 81 - 81: IiII * I11i - iIii1I11I1II1
if 41 - 41: oO0o * I11i + I1IiiI - OoO0O00
if 63 - 63: Oo0Ooo * Ii1I - Ii1I
if ( Iiii11 . find ( addr_str ) == - 1 ) : continue
if 76 - 76: OoO0O00 . IiII % iIii1I11I1II1 / I1IiiI + iIii1I11I1II1 . I1IiiI
if 57 - 57: IiII - i1IIi * ooOoO0o
if 5 - 5: oO0o . O0 * IiII / Ii1I + OoO0O00
if 75 - 75: OOooOOo * OoOoOO00
if ( Iiii11 == addr_str ) : continue
if 82 - 82: Ii1I
if 83 - 83: I1IiiI
if 22 - 22: IiII / Ii1I + I1Ii111 % iIii1I11I1II1
if 75 - 75: OoOoOO00 % OoOoOO00 % o0oOOo0O0Ooo % I1ii11iIi11i + IiII
iiIIIIiI111 = lisp_crypto_keys_by_rloc_decap [ Iiii11 ]
if ( iiIIIIiI111 == OOooOo00Ooo ) : continue
if 45 - 45: I11i - iIii1I11I1II1
if 20 - 20: OoOoOO00
if 84 - 84: OoOoOO00
if 59 - 59: Ii1I / I1Ii111 + i11iIiiIii
IiI11 = iiIIIIiI111 [ 1 ]
if ( packet_icv != IiI11 . do_icv ( packet , iv ) ) :
lprint ( "Test ICV with key {} failed" . format ( red ( Iiii11 , False ) ) )
continue
if 68 - 68: IiII
if 42 - 42: O0 . ooOoO0o + OOooOOo . iIii1I11I1II1 * OoO0O00 . iII111i
lprint ( "Changing decap crypto key to {}" . format ( red ( Iiii11 , False ) ) )
lisp_crypto_keys_by_rloc_decap [ addr_str ] = iiIIIIiI111
if 35 - 35: II111iiii + I11i
return
if 15 - 15: Oo0Ooo . i1IIi - o0oOOo0O0Ooo - oO0o / o0oOOo0O0Ooo
if 97 - 97: oO0o - I1IiiI / Ii1I
if 48 - 48: o0oOOo0O0Ooo % o0oOOo0O0Ooo - OoOoOO00
if 13 - 13: OoO0O00 - Ii1I . ooOoO0o / O0 * OoOoOO00
if 57 - 57: O0 + OoooooooOO % o0oOOo0O0Ooo / I1Ii111 / OOooOOo - OoOoOO00
if 48 - 48: o0oOOo0O0Ooo - II111iiii + OoOoOO00
if 54 - 54: II111iiii - OoO0O00 - o0oOOo0O0Ooo - O0 % I1Ii111
if 9 - 9: i1IIi % iII111i / Ii1I
def lisp_decent_pull_xtr_configured ( ) :
return ( lisp_decent_modulus != 0 and lisp_decent_dns_suffix != None )
if 83 - 83: oO0o
if 1 - 1: oO0o * iIii1I11I1II1 % iIii1I11I1II1 % iIii1I11I1II1 / oO0o + IiII
if 29 - 29: OoooooooOO
if 55 - 55: O0 - o0oOOo0O0Ooo % I1ii11iIi11i * I11i * oO0o
if 83 - 83: iIii1I11I1II1
if 92 - 92: OoO0O00 - iII111i
if 97 - 97: ooOoO0o / I11i . IiII + I1Ii111 . iIii1I11I1II1
if 24 - 24: ooOoO0o - oO0o % OoOoOO00 * Oo0Ooo
def lisp_is_decent_dns_suffix ( dns_name ) :
if ( lisp_decent_dns_suffix == None ) : return ( False )
i1i1Ii = dns_name . split ( "." )
i1i1Ii = "." . join ( i1i1Ii [ 1 : : ] )
return ( i1i1Ii == lisp_decent_dns_suffix )
if 54 - 54: Ii1I - OoooooooOO % I1IiiI + oO0o
if 70 - 70: I1Ii111 % iIii1I11I1II1
if 74 - 74: i1IIi % i11iIiiIii + oO0o
if 94 - 94: OoO0O00 * I1IiiI / O0 + I1Ii111 / i11iIiiIii
if 34 - 34: Oo0Ooo . i1IIi
if 97 - 97: I11i
if 89 - 89: iII111i % OoOoOO00 . Oo0Ooo
def lisp_get_decent_index ( eid ) :
oO00oo000O = eid . print_prefix ( )
iII1III11ii = hashlib . sha256 ( oO00oo000O ) . hexdigest ( )
oo0OOo0O = int ( iII1III11ii , 16 ) % lisp_decent_modulus
return ( oo0OOo0O )
if 24 - 24: OOooOOo . oO0o / I1Ii111 / IiII - iII111i
if 23 - 23: iIii1I11I1II1 * ooOoO0o * iII111i * i11iIiiIii * i1IIi
if 25 - 25: O0 / OoO0O00 - oO0o - I1IiiI * OoOoOO00
if 98 - 98: OoO0O00 % OoooooooOO + OoooooooOO * OoOoOO00 / OoO0O00 + o0oOOo0O0Ooo
if 25 - 25: OoO0O00 % OoOoOO00
if 15 - 15: OoO0O00 + I1ii11iIi11i
if 88 - 88: OoooooooOO / I11i % II111iiii % OOooOOo - I11i
def lisp_get_decent_dns_name ( eid ) :
oo0OOo0O = lisp_get_decent_index ( eid )
return ( str ( oo0OOo0O ) + "." + lisp_decent_dns_suffix )
if 55 - 55: Oo0Ooo - OOooOOo - O0
if 40 - 40: OoOoOO00 - OOooOOo
if 3 - 3: IiII % I11i * I1Ii111 + iIii1I11I1II1 . oO0o
if 35 - 35: II111iiii
if 15 - 15: I11i * iIii1I11I1II1 + OOooOOo % IiII . o0oOOo0O0Ooo % Oo0Ooo
if 96 - 96: O0
if 15 - 15: i1IIi . iIii1I11I1II1
if 3 - 3: II111iiii * i11iIiiIii * i1IIi - i1IIi
def lisp_get_decent_dns_name_from_str ( iid , eid_str ) :
Oo00o = lisp_address ( LISP_AFI_NONE , eid_str , 0 , iid )
oo0OOo0O = lisp_get_decent_index ( Oo00o )
return ( str ( oo0OOo0O ) + "." + lisp_decent_dns_suffix )
if 11 - 11: I1IiiI % Ii1I * i11iIiiIii % OOooOOo + II111iiii
if 61 - 61: I1Ii111 + I11i + I1IiiI
if 48 - 48: I11i
if 67 - 67: o0oOOo0O0Ooo
if 36 - 36: IiII - I11i - Ii1I / OoOoOO00 % OoO0O00 * iIii1I11I1II1
if 61 - 61: i11iIiiIii / Ii1I - OOooOOo . I1ii11iIi11i
if 89 - 89: ooOoO0o % i11iIiiIii
if 57 - 57: Oo0Ooo / ooOoO0o - O0 . ooOoO0o
if 61 - 61: o0oOOo0O0Ooo / OoooooooOO . I1ii11iIi11i + Oo0Ooo
if 75 - 75: Ii1I
def lisp_trace_append ( packet , reason = None , ed = "encap" , lisp_socket = None ,
rloc_entry = None ) :
if 79 - 79: i1IIi . I1ii11iIi11i * o0oOOo0O0Ooo / I11i . I11i / ooOoO0o
ii = 28 if packet . inner_version == 4 else 48
OO00oo0 = packet . packet [ ii : : ]
OoOooO00 = lisp_trace ( )
if ( OoOooO00 . decode ( OO00oo0 ) == False ) :
lprint ( "Could not decode JSON portion of a LISP-Trace packet" )
return ( False )
if 58 - 58: Oo0Ooo % i11iIiiIii . Oo0Ooo / Oo0Ooo - I1IiiI . Ii1I
if 65 - 65: OoO0O00
I11iiiiiII1 = "?" if packet . outer_dest . is_null ( ) else packet . outer_dest . print_address_no_iid ( )
if 6 - 6: I1Ii111 + OoO0O00 + O0 * OoOoOO00 . iIii1I11I1II1 . I1Ii111
if 93 - 93: ooOoO0o % iIii1I11I1II1 + I1ii11iIi11i
if 74 - 74: OoOoOO00 + I1ii11iIi11i
if 82 - 82: II111iiii
if 55 - 55: I11i . iIii1I11I1II1 / Ii1I - OoO0O00 * I1ii11iIi11i % iIii1I11I1II1
if 48 - 48: ooOoO0o + Oo0Ooo / Oo0Ooo
if ( I11iiiiiII1 != "?" and packet . encap_port != LISP_DATA_PORT ) :
if ( ed == "encap" ) : I11iiiiiII1 += ":{}" . format ( packet . encap_port )
if 15 - 15: iIii1I11I1II1 . I1Ii111 * OoooooooOO * O0 % OOooOOo
if 53 - 53: Ii1I
if 63 - 63: I11i % OoOoOO00
if 46 - 46: iIii1I11I1II1 . II111iiii / OoooooooOO - ooOoO0o * iII111i
if 52 - 52: I11i + iII111i
iiIIIIiI111 = { }
iiIIIIiI111 [ "node" ] = "ITR" if lisp_i_am_itr else "ETR" if lisp_i_am_etr else "RTR" if lisp_i_am_rtr else "?"
if 9 - 9: OoOoOO00 % II111iiii . I11i * Oo0Ooo
OoOo = packet . outer_source
if ( OoOo . is_null ( ) ) : OoOo = lisp_myrlocs [ 0 ]
iiIIIIiI111 [ "srloc" ] = OoOo . print_address_no_iid ( )
if 92 - 92: oO0o . II111iiii
if 4 - 4: IiII . i1IIi - i1IIi - O0 - OOooOOo * I1Ii111
if 67 - 67: i11iIiiIii % OoooooooOO - o0oOOo0O0Ooo + OoOoOO00 + OoooooooOO
if 66 - 66: OoOoOO00 . Ii1I / i11iIiiIii / ooOoO0o
if 76 - 76: OoO0O00 % OoO0O00 / I1ii11iIi11i * ooOoO0o * o0oOOo0O0Ooo - I1Ii111
if ( iiIIIIiI111 [ "node" ] == "ITR" and packet . inner_sport != LISP_TRACE_PORT ) :
iiIIIIiI111 [ "srloc" ] += ":{}" . format ( packet . inner_sport )
if 53 - 53: OoO0O00 % Oo0Ooo . i1IIi
if 34 - 34: Ii1I - o0oOOo0O0Ooo * i1IIi
iiIIIIiI111 [ "hn" ] = lisp_hostname
Iiii11 = ed + "-ts"
iiIIIIiI111 [ Iiii11 ] = lisp_get_timestamp ( )
if 7 - 7: OoO0O00 * I1ii11iIi11i / I1Ii111
if 98 - 98: II111iiii % I1ii11iIi11i
if 48 - 48: iII111i % oO0o + oO0o - Oo0Ooo . OOooOOo
if 38 - 38: iII111i
if 66 - 66: iII111i + Oo0Ooo + i1IIi * Oo0Ooo
if 18 - 18: O0 - IiII
if ( I11iiiiiII1 == "?" and iiIIIIiI111 [ "node" ] == "ETR" ) :
iIiIIi1i = lisp_db_for_lookups . lookup_cache ( packet . inner_dest , False )
if ( iIiIIi1i != None and len ( iIiIIi1i . rloc_set ) >= 1 ) :
I11iiiiiII1 = iIiIIi1i . rloc_set [ 0 ] . rloc . print_address_no_iid ( )
if 5 - 5: I1ii11iIi11i * iII111i + II111iiii * Oo0Ooo * O0 - I1IiiI
if 71 - 71: i11iIiiIii % I1IiiI + I1ii11iIi11i + II111iiii + OoooooooOO + oO0o
iiIIIIiI111 [ "drloc" ] = I11iiiiiII1
if 12 - 12: I1IiiI + I1Ii111
if 66 - 66: I1Ii111 + OOooOOo + I1Ii111 . OoooooooOO * oO0o / OoO0O00
if 74 - 74: O0 % OOooOOo * OoOoOO00 / oO0o - Oo0Ooo
if 79 - 79: Ii1I + IiII
if ( I11iiiiiII1 == "?" and reason != None ) :
iiIIIIiI111 [ "drloc" ] += " ({})" . format ( reason )
if 21 - 21: o0oOOo0O0Ooo * iII111i * o0oOOo0O0Ooo * o0oOOo0O0Ooo . Oo0Ooo
if 98 - 98: I1ii11iIi11i
if 58 - 58: IiII / i11iIiiIii % I11i
if 74 - 74: OoooooooOO - I1ii11iIi11i + OOooOOo % IiII . o0oOOo0O0Ooo
if 21 - 21: Ii1I
if ( rloc_entry != None ) :
iiIIIIiI111 [ "rtts" ] = rloc_entry . recent_rloc_probe_rtts
iiIIIIiI111 [ "hops" ] = rloc_entry . recent_rloc_probe_hops
if 72 - 72: I1Ii111 . OoooooooOO / I1Ii111 - Ii1I / I1ii11iIi11i * I1ii11iIi11i
if 72 - 72: IiII . Ii1I + OoooooooOO * OoOoOO00 + Oo0Ooo . iII111i
if 92 - 92: O0 * Ii1I - I1ii11iIi11i - IiII . OoO0O00 + I1IiiI
if 59 - 59: i1IIi * OOooOOo % Oo0Ooo
if 44 - 44: iIii1I11I1II1 . OOooOOo
if 57 - 57: II111iiii + I1Ii111
oOoO = packet . inner_source . print_address ( )
iII1I1iiII11I = packet . inner_dest . print_address ( )
if ( OoOooO00 . packet_json == [ ] ) :
i11i11 = { }
i11i11 [ "seid" ] = oOoO
i11i11 [ "deid" ] = iII1I1iiII11I
i11i11 [ "paths" ] = [ ]
OoOooO00 . packet_json . append ( i11i11 )
if 42 - 42: OoOoOO00 % O0
if 70 - 70: iIii1I11I1II1 * Oo0Ooo - I1IiiI / OoO0O00 + OoOoOO00
if 94 - 94: OoooooooOO + O0 * iIii1I11I1II1 * II111iiii
if 90 - 90: I11i + O0 / I1IiiI . oO0o / O0
if 46 - 46: O0 . O0 - oO0o . II111iiii * I1IiiI * Ii1I
if 10 - 10: i1IIi + i1IIi . i1IIi - I1IiiI - I1IiiI
for i11i11 in OoOooO00 . packet_json :
if ( i11i11 [ "deid" ] != iII1I1iiII11I ) : continue
i11i11 [ "paths" ] . append ( iiIIIIiI111 )
break
if 26 - 26: Ii1I * I11i / I11i
if 79 - 79: ooOoO0o / oO0o - oO0o / OoooooooOO
if 91 - 91: iIii1I11I1II1 - O0 * o0oOOo0O0Ooo * o0oOOo0O0Ooo . II111iiii
if 69 - 69: II111iiii - Oo0Ooo + i1IIi . II111iiii + o0oOOo0O0Ooo
if 20 - 20: OoooooooOO - OoO0O00 * ooOoO0o * OoOoOO00 / OOooOOo
if 64 - 64: O0 + iII111i / I11i * OoOoOO00 + o0oOOo0O0Ooo + I1Ii111
if 16 - 16: I11i
if 9 - 9: Ii1I / IiII * I11i - i11iIiiIii * I1ii11iIi11i / iII111i
oo0Oo00o000 = False
if ( len ( OoOooO00 . packet_json ) == 1 and iiIIIIiI111 [ "node" ] == "ETR" and
OoOooO00 . myeid ( packet . inner_dest ) ) :
i11i11 = { }
i11i11 [ "seid" ] = iII1I1iiII11I
i11i11 [ "deid" ] = oOoO
i11i11 [ "paths" ] = [ ]
OoOooO00 . packet_json . append ( i11i11 )
oo0Oo00o000 = True
if 17 - 17: II111iiii
if 29 - 29: o0oOOo0O0Ooo - iII111i
if 49 - 49: O0 . I1ii11iIi11i . OoOoOO00 . I1Ii111 % O0 . iIii1I11I1II1
if 19 - 19: iIii1I11I1II1
if 97 - 97: Ii1I . I11i / ooOoO0o + Oo0Ooo
if 100 - 100: iII111i / I1Ii111 % OoOoOO00 . O0 / OoOoOO00
OoOooO00 . print_trace ( )
OO00oo0 = OoOooO00 . encode ( )
if 81 - 81: OoO0O00 % i11iIiiIii / OoO0O00 + ooOoO0o
if 100 - 100: O0 . Oo0Ooo % Oo0Ooo % O0 / i11iIiiIii
if 56 - 56: IiII - OOooOOo - OoOoOO00 - I11i
if 57 - 57: i1IIi
if 41 - 41: I11i / Ii1I
if 1 - 1: II111iiii / iII111i
if 83 - 83: OoO0O00 / iII111i
if 59 - 59: I1Ii111 % OOooOOo . I1IiiI + I1ii11iIi11i % oO0o
oOoOOOO = OoOooO00 . packet_json [ 0 ] [ "paths" ] [ 0 ] [ "srloc" ]
if ( I11iiiiiII1 == "?" ) :
lprint ( "LISP-Trace return to sender RLOC {}" . format ( oOoOOOO ) )
OoOooO00 . return_to_sender ( lisp_socket , oOoOOOO , OO00oo0 )
return ( False )
if 27 - 27: OoOoOO00 . I11i - Ii1I
if 82 - 82: I1IiiI + OoOoOO00 . II111iiii / OoOoOO00 % OoOoOO00 . I1ii11iIi11i
if 19 - 19: iIii1I11I1II1 . iIii1I11I1II1 + OOooOOo - I1ii11iIi11i
if 59 - 59: i11iIiiIii / oO0o * IiII . o0oOOo0O0Ooo % Ii1I
if 95 - 95: OoooooooOO - I1IiiI * I1ii11iIi11i
if 52 - 52: oO0o % iII111i - I1IiiI - o0oOOo0O0Ooo
iIi1IIiIII1 = OoOooO00 . packet_length ( )
if 66 - 66: o0oOOo0O0Ooo - Oo0Ooo - OoooooooOO * o0oOOo0O0Ooo + I1Ii111
if 82 - 82: I11i * i1IIi / Ii1I + O0
if 85 - 85: O0 + oO0o / I1Ii111
if 65 - 65: o0oOOo0O0Ooo . Oo0Ooo . i1IIi / IiII . I11i . O0
if 69 - 69: Oo0Ooo - i11iIiiIii
if 87 - 87: Oo0Ooo % OOooOOo - Ii1I
I1i11i1i1iI = packet . packet [ 0 : ii ]
i111 = struct . pack ( "HH" , socket . htons ( iIi1IIiIII1 ) , 0 )
I1i11i1i1iI = I1i11i1i1iI [ 0 : ii - 4 ] + i111
if ( packet . inner_version == 6 and iiIIIIiI111 [ "node" ] == "ETR" and
len ( OoOooO00 . packet_json ) == 2 ) :
OOOOo00oo00O = I1i11i1i1iI [ ii - 8 : : ] + OO00oo0
OOOOo00oo00O = lisp_udp_checksum ( oOoO , iII1I1iiII11I , OOOOo00oo00O )
I1i11i1i1iI = I1i11i1i1iI [ 0 : ii - 8 ] + OOOOo00oo00O [ 0 : 8 ]
if 11 - 11: iIii1I11I1II1 % IiII . I11i
if 59 - 59: O0 + II111iiii + IiII % Oo0Ooo
if 71 - 71: oO0o
if 75 - 75: Oo0Ooo * oO0o + iIii1I11I1II1 / Oo0Ooo
if 51 - 51: Ii1I * Ii1I + iII111i * oO0o / OOooOOo - ooOoO0o
if 16 - 16: I1Ii111 + O0 - O0 * iIii1I11I1II1 / iII111i
if ( oo0Oo00o000 ) :
if ( packet . inner_version == 4 ) :
I1i11i1i1iI = I1i11i1i1iI [ 0 : 12 ] + I1i11i1i1iI [ 16 : 20 ] + I1i11i1i1iI [ 12 : 16 ] + I1i11i1i1iI [ 22 : 24 ] + I1i11i1i1iI [ 20 : 22 ] + I1i11i1i1iI [ 24 : : ]
if 4 - 4: iII111i
else :
I1i11i1i1iI = I1i11i1i1iI [ 0 : 8 ] + I1i11i1i1iI [ 24 : 40 ] + I1i11i1i1iI [ 8 : 24 ] + I1i11i1i1iI [ 42 : 44 ] + I1i11i1i1iI [ 40 : 42 ] + I1i11i1i1iI [ 44 : : ]
if 75 - 75: I1IiiI * IiII % OoO0O00 - ooOoO0o * iII111i
if 32 - 32: iII111i
i1 = packet . inner_dest
packet . inner_dest = packet . inner_source
packet . inner_source = i1
if 59 - 59: OoOoOO00 - I1Ii111
if 34 - 34: ooOoO0o . OoooooooOO / ooOoO0o + OoooooooOO
if 24 - 24: OoooooooOO * I1ii11iIi11i / O0 / Oo0Ooo * I1IiiI / ooOoO0o
if 33 - 33: Ii1I
if 20 - 20: Ii1I + I11i
ii = 2 if packet . inner_version == 4 else 4
oOoooOo0O0 = 20 + iIi1IIiIII1 if packet . inner_version == 4 else iIi1IIiIII1
O0Ooo000 = struct . pack ( "H" , socket . htons ( oOoooOo0O0 ) )
I1i11i1i1iI = I1i11i1i1iI [ 0 : ii ] + O0Ooo000 + I1i11i1i1iI [ ii + 2 : : ]
if 63 - 63: OoO0O00
if 66 - 66: iIii1I11I1II1
if 98 - 98: iII111i . oO0o % I1Ii111 + Oo0Ooo
if 83 - 83: Oo0Ooo % oO0o - iII111i
if ( packet . inner_version == 4 ) :
iI1I1iII1iII = struct . pack ( "H" , 0 )
I1i11i1i1iI = I1i11i1i1iI [ 0 : 10 ] + iI1I1iII1iII + I1i11i1i1iI [ 12 : : ]
O0Ooo000 = lisp_ip_checksum ( I1i11i1i1iI [ 0 : 20 ] )
I1i11i1i1iI = O0Ooo000 + I1i11i1i1iI [ 20 : : ]
if 49 - 49: oO0o / OoooooooOO . OoooooooOO
if 1 - 1: I1IiiI - O0
if 98 - 98: i11iIiiIii
if 52 - 52: iIii1I11I1II1 - OoO0O00 * Ii1I - i11iIiiIii
if 88 - 88: o0oOOo0O0Ooo - I1IiiI / I1IiiI
packet . packet = I1i11i1i1iI + OO00oo0
return ( True )
if 54 - 54: i1IIi + IiII . iIii1I11I1II1 + O0 * IiII - OOooOOo
if 41 - 41: ooOoO0o . O0 * iII111i / iIii1I11I1II1 * OOooOOo . II111iiii
if 92 - 92: I1ii11iIi11i / I1ii11iIi11i . o0oOOo0O0Ooo + OoooooooOO . II111iiii
if 60 - 60: oO0o / OoOoOO00 % I1ii11iIi11i . ooOoO0o + iII111i - iIii1I11I1II1
if 20 - 20: o0oOOo0O0Ooo . IiII / ooOoO0o / o0oOOo0O0Ooo - OoooooooOO / oO0o
if 40 - 40: OOooOOo * OoO0O00
if 21 - 21: oO0o
if 30 - 30: I1ii11iIi11i . O0 . Oo0Ooo
if 23 - 23: i11iIiiIii / I11i + i1IIi % I1Ii111
if 100 - 100: Oo0Ooo
def lisp_allow_gleaning ( eid , rloc ) :
if ( lisp_glean_mappings == [ ] ) : return ( False , False )
if 13 - 13: I1IiiI + ooOoO0o * II111iiii
for iiIIIIiI111 in lisp_glean_mappings :
if ( iiIIIIiI111 . has_key ( "instance-id" ) ) :
II1 = eid . instance_id
ii11 , OO0o = iiIIIIiI111 [ "instance-id" ]
if ( II1 < ii11 or II1 > OO0o ) : continue
if 32 - 32: iIii1I11I1II1 + O0 + i1IIi
if ( iiIIIIiI111 . has_key ( "eid-prefix" ) ) :
Oo0ooo0Ooo = copy . deepcopy ( iiIIIIiI111 [ "eid-prefix" ] )
Oo0ooo0Ooo . instance_id = eid . instance_id
if ( eid . is_more_specific ( Oo0ooo0Ooo ) == False ) : continue
if 28 - 28: IiII + I11i
if ( iiIIIIiI111 . has_key ( "rloc-prefix" ) ) :
if ( rloc != None and rloc . is_more_specific ( iiIIIIiI111 [ "rloc-prefix" ] )
== False ) : continue
if 1 - 1: OoooooooOO - i11iIiiIii . OoooooooOO - o0oOOo0O0Ooo - OOooOOo * I1Ii111
return ( True , iiIIIIiI111 [ "rloc-probe" ] )
if 56 - 56: Ii1I . OoO0O00
return ( False , False )
if 43 - 43: iII111i * iII111i
if 31 - 31: O0 - iIii1I11I1II1 . I11i . oO0o
if 96 - 96: OoooooooOO * iIii1I11I1II1 * Oo0Ooo
if 76 - 76: OoO0O00 / i11iIiiIii % ooOoO0o % I11i * O0
if 84 - 84: II111iiii - iII111i / IiII . O0 % i1IIi / I1ii11iIi11i
if 2 - 2: OoooooooOO . OoO0O00 . II111iiii / Ii1I - OOooOOo % Oo0Ooo
if 47 - 47: OOooOOo * oO0o
def lisp_glean_map_cache ( eid , rloc , encap_port ) :
if 41 - 41: OoooooooOO * I1IiiI
if 3 - 3: IiII
if 96 - 96: I11i - OOooOOo + I11i
if 71 - 71: Oo0Ooo
if 48 - 48: o0oOOo0O0Ooo / II111iiii / OoOoOO00 * o0oOOo0O0Ooo + I1IiiI . OoOoOO00
if 52 - 52: Ii1I / OoOoOO00 . OOooOOo * IiII . OoooooooOO
ooooOoo000O = lisp_map_cache . lookup_cache ( eid , True )
if ( ooooOoo000O and len ( ooooOoo000O . rloc_set ) != 0 ) :
ooooOoo000O . last_refresh_time = lisp_get_timestamp ( )
if 6 - 6: i1IIi . oO0o % IiII . Oo0Ooo % I11i
Oo00oO = ooooOoo000O . rloc_set [ 0 ]
if ( Oo00oO . rloc . is_exact_match ( rloc ) and
Oo00oO . translated_port == encap_port ) : return
if 1 - 1: i1IIi * I1ii11iIi11i
Oo0ooo0Ooo = green ( eid . print_address ( ) , False )
Oo0O = red ( rloc . print_address_no_iid ( ) + ":" + str ( encap_port ) , False )
lprint ( "Gleaned EID {} RLOC changed to {}" . format ( Oo0ooo0Ooo , Oo0O ) )
Oo00oO . delete_from_rloc_probe_list ( ooooOoo000O . eid , ooooOoo000O . group )
else :
ooooOoo000O = lisp_mapping ( "" , "" , [ ] )
ooooOoo000O . eid . copy_address ( eid )
ooooOoo000O . mapping_source . copy_address ( rloc )
ooooOoo000O . map_cache_ttl = LISP_GLEAN_TTL
ooooOoo000O . gleaned = True
Oo0ooo0Ooo = green ( eid . print_address ( ) , False )
Oo0O = red ( rloc . print_address_no_iid ( ) + ":" + str ( encap_port ) , False )
lprint ( "Add gleaned EID {} to map-cache with RLOC {}" . format ( Oo0ooo0Ooo , Oo0O ) )
ooooOoo000O . add_cache ( )
if 92 - 92: I1ii11iIi11i + I11i - I11i - IiII . I11i
if 34 - 34: iIii1I11I1II1 - oO0o * i11iIiiIii * o0oOOo0O0Ooo
if 15 - 15: I1Ii111
if 25 - 25: I1ii11iIi11i * O0
if 8 - 8: i11iIiiIii
if 95 - 95: ooOoO0o + i1IIi / OOooOOo . i11iIiiIii
O0OO0O = lisp_rloc ( )
O0OO0O . store_translated_rloc ( rloc , encap_port )
O0OO0O . add_to_rloc_probe_list ( ooooOoo000O . eid , ooooOoo000O . group )
O0OO0O . priority = 253
O0OO0O . mpriority = 255
iiiI11II1IiIi = [ O0OO0O ]
ooooOoo000O . rloc_set = iiiI11II1IiIi
ooooOoo000O . build_best_rloc_set ( )
if 31 - 31: iII111i - iII111i - oO0o
if 62 - 62: Oo0Ooo % Oo0Ooo / OoooooooOO * o0oOOo0O0Ooo . Ii1I
if 1 - 1: I1ii11iIi11i / II111iiii / II111iiii + o0oOOo0O0Ooo + OoooooooOO
# dd678faae9ac167bc83abf78e5cb2f3f0688d3a3
|
engine.py
|
import rq
import time
import redis
import multiprocessing
from queue import Queue
from functools import partial
from threading import Thread, Lock
redis_conn = redis.Redis()
q = rq.Queue(connection=redis_conn)
def launch_task(func, *args, **kwargs):
"""Function to enqueue target function with arguments and return a job id
"""
job = q.enqueue(func, *args, **kwargs)
return job.get_id()
def queue_completed(tasks):
"""Blocking function to hang while job id is not present in Finished
Registry."""
for i in tasks:
while i not in rq.registry.FinishedJobRegistry(queue=q):
time.sleep(1)
return True
class Worker:
"""
Class that defines a wrapper that actions a target function over elements
presented in an iterable data structure in a sequential process.
This is also the base class in the module from which initial attributes are
inherited and used to establish wrappers that action concurrent and
parallel processes respectively.
See Also
--------
ConcurrentWorker
ParallelWorker
"""
def __init__(self, func, iterable_arg, *args, iterable=[], **kwargs):
"""
Class initialiser to establish the target function, the iterable, and
any positional or keywork arguments that should be parsed to the
function.
Parameters
----------
func
This can be any function that accepts a keyword argument that could
be stored in an iterable data structure.
iterable_arg : str
The keyword that references the iterable argument in the target
function.
iterable : list
An iterable containing elements that are individually parsed to the
target function as a keyword argument.
*args, **kwargs
Any positional or keyword arguments required by the target function
and remain constant.
Attributes
----------
func
The function with, if any, positional and keyword arguments
included.
iterable
The iterable parsed above.
iterable_arg
The target function's keyword tha parses the iterable's element.
Examples
--------
>>> from copy import deepcopy
>>> from htp.api.oanda import Candles
>>> from htp.toolbox.dates import Select
>>> instrument = "AUD_JPY"
>>> func = Candles.to_df
>>> queryParameters = {"granularity": "D"}
>>> date_gen = Select().by_month(
... period=2, no_days=[5, 6], year_by_day=True)
>>> date_list = []
>>> for i in date_gen:
... queryParameters["from"] = i["from"]
... queryParameters["to"] = i["to"]
... date_list.append(deepcopy(queryParameters))
>>> d = Worker(func, "queryParameters", iterable=date_list,
... instrument=instrument)
>>> print(d.func)
functools.partial(<bound method Candles.to_df of <class \
'htp.api.oanda.Candles'>>, instrument='AUD_JPY')
"""
self.func = partial(func, *args, **kwargs)
self.iterable = iterable
self.iterable_arg = iterable_arg
@classmethod
def sync(cls, *args, **kwargs):
"""
To execute the target function sequentially across the given iterable's
elements, with the provided positional and keyword arguments.
Returns
-------
list
A list, where the elements represent the respective results from
calling the target function on each value stored in the iterable.
Examples
--------
>>> from copy import deepcopy
>>> from htp.api.oanda import Candles
>>> from htp.toolbox.dates import Select
>>> instrument = "AUD_JPY"
>>> func = Candles.to_df
>>> queryParameters = {"granularity": "D"}
>>> date_gen = Select().by_month(
... period=2, no_days=[5, 6], year_by_day=True)
>>> date_list = []
>>> for i in date_gen:
... queryParameters["from"] = i["from"]
... queryParameters["to"] = i["to"]
... date_list.append(deepcopy(queryParameters))
>>> d = Worker.sync(func, "queryParameters", iterable=date_list,
... instrument=instrument)
>>> print(d[1].head())
open high low close
2019-06-02 21:00:00 75.068 75.487 74.968 75.401
2019-06-03 21:00:00 75.396 75.696 75.082 75.606
2019-06-04 21:00:00 75.604 75.904 75.404 75.585
2019-06-05 21:00:00 75.594 75.776 75.280 75.628
2019-06-06 21:00:00 75.632 75.817 75.492 75.738
"""
k = cls(*args, **kwargs)
st = []
for i in iter(k.iterable):
st.append(k.func(**{k.iterable_arg: i}))
return st
class ConcurrentWorker(Worker):
"""
Class that inherit from `Worker` and subsequently provides concurrent
processing functionality to a target function.
See Also
--------
Worker
"""
def __init__(self, func, iterable_arg, *args, **kwargs):
"""
Class initialiser that inherits from the `Worker` class and assigns
private attributes required to concurrently process a target function
across given elements in an iterable.
Attributes
----------
_lock
_queue
See Also
--------
Worker.__init__
"""
super().__init__(func, iterable_arg, *args, **kwargs)
self._lock = Lock()
self._queue = Queue()
def _threader(self, st):
"""
Threader function that feeds items from the queue to the target
function.
Parameters
-----------
st : list
An empty list into which the returned values from the target
function are stored.
"""
while True:
item = self._queue.get()
if item is None:
break
st.append(self.func(**{self.iterable_arg: item}))
self._queue.task_done()
def crt(self):
"""
Function that assembles the required components to action the target
function concurrently against an iterable's elements stored in a queue.
Returns
-------
list
A list of elements, each the respective result of the target
function working on a given value present in the iterable.
"""
t = []
results = []
for i in range(4):
thread = Thread(target=self._threader, args=(results,))
thread.daemon = True
thread.start()
t.append(thread)
self._queue.join()
for job in self.iterable:
self._queue.put(job)
for i in range(4):
self._queue.put(None)
for threads in t:
threads.join()
return results
class Parallel(Worker):
"""
Class that inherit from `Worker` and subsequently provides parallel
processing functionality to a target function.
See Also
--------
Worker
"""
def __init__(self, func, iterable_arg, *args, **kwargs):
"""
ParallelWorker is a class that inherits from Worker for necessary
attributes and then provides a pool of workers for the parsed
task and arguments to be worked on in parallel.
See Also
--------
Worker.__init__
"""
super().__init__(func, iterable_arg, *args, **kwargs)
def _init_lock(self, l_):
"""
Lock initialiser used in the pool setup.
"""
global lock
lock = l_
def _arg_kw(self, func, k, iterable):
"""
Internal helper function to parse the elements stored in an iterable as
keyword arguments in the target function.
"""
return func(**{k: iterable})
@classmethod
def worker(cls, *args, lock_func=None, lock_arg=None, **kwargs):
"""
Method to run target function in parallel. The pool of workers is
initialised with a lock that is used for logging in the target
function.
Returns
-------
list
A list of elements, each the respective result of the target
function working on a given value present in the iterable.
"""
k = cls(*args, **kwargs)
if lock_func is None:
lock_func = k._init_lock
lock_arg = multiprocessing.Lock()
pool = multiprocessing.Pool(
processes=3, initializer=lock_func, initargs=(lock_arg,))
results = []
for i in pool.map(partial(k._arg_kw, k.func, k.iterable_arg),
k.iterable):
results.append(i)
pool.close()
pool.join()
return results
if __name__ == "__main__":
import os
import pandas as pd
from loguru import logger
from copy import deepcopy
from pprint import pprint
from htp.api.oanda import Candles
from htp.toolbox.dates import Select
# f = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
# logging.basicConfig(level=logging.INFO, format=f)
logger.enable("htp.api.oanda")
cf = os.path.join(os.path.dirname(__file__), "../..", "config.yaml")
instrument = "AUD_JPY"
func = Candles.to_df
queryParameters = {"granularity": "D"}
# date_gen = Select().by_month(period=5, no_days=[6], year_by_day=True)
date_gen = Select(
from_="2019-03-04 21:00:00", to="2019-06-15 22:00:00",
local_tz="America/New_York").by_month()
date_list = []
for i in date_gen:
queryParameters["from"] = i["from"]
queryParameters["to"] = i["to"]
date_list.append(deepcopy(queryParameters))
# sys.exit()
start_time = time.time()
d = Parallel.worker(
func, "queryParameters", iterable=date_list, configFile=cf,
instrument=instrument)
pprint(pd.concat(d, axis=0))
print("--- %s seconds ---" % (time.time() - start_time))
"""
start_time = time.time()
d = ConcurrentWorker(
func, "queryParameters", iterable=date_list, configFile=cf,
instrument=instrument).crt()
pprint(d)
print("--- %s seconds ---" % (time.time() - start_time))
start_time = time.time()
d = Worker(
func, "queryParameters", iterable=date_list, configFile=cf,
instrument=instrument).seq()
pprint(d)
print("--- %s seconds ---" % (time.time() - start_time))
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.